1 // SPDX-License-Identifier: GPL-2.0-only
3 * sleep.c - ACPI sleep support.
5 * Copyright (c) 2005 Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>
6 * Copyright (c) 2004 David Shaohua Li <shaohua.li@intel.com>
7 * Copyright (c) 2000-2003 Patrick Mochel
8 * Copyright (c) 2003 Open Source Development Lab
11 #define pr_fmt(fmt) "ACPI: PM: " fmt
13 #include <linux/delay.h>
14 #include <linux/irq.h>
15 #include <linux/dmi.h>
16 #include <linux/device.h>
17 #include <linux/interrupt.h>
18 #include <linux/suspend.h>
19 #include <linux/reboot.h>
20 #include <linux/acpi.h>
21 #include <linux/module.h>
22 #include <linux/syscore_ops.h>
24 #include <trace/events/power.h>
30 * Some HW-full platforms do not have _S5, so they may need
31 * to leverage efi power off for a shutdown.
34 static u8 sleep_states
[ACPI_S_STATE_COUNT
];
36 static void acpi_sleep_tts_switch(u32 acpi_state
)
40 status
= acpi_execute_simple_method(NULL
, "\\_TTS", acpi_state
);
41 if (ACPI_FAILURE(status
) && status
!= AE_NOT_FOUND
) {
43 * OS can't evaluate the _TTS object correctly. Some warning
44 * message will be printed. But it won't break anything.
46 pr_notice("Failure in evaluating _TTS object\n");
50 static int tts_notify_reboot(struct notifier_block
*this,
51 unsigned long code
, void *x
)
53 acpi_sleep_tts_switch(ACPI_STATE_S5
);
57 static struct notifier_block tts_notifier
= {
58 .notifier_call
= tts_notify_reboot
,
63 #ifndef acpi_skip_set_wakeup_address
64 #define acpi_skip_set_wakeup_address() false
67 static int acpi_sleep_prepare(u32 acpi_state
)
69 #ifdef CONFIG_ACPI_SLEEP
70 unsigned long acpi_wakeup_address
;
72 /* do we have a wakeup address for S2 and S3? */
73 if (acpi_state
== ACPI_STATE_S3
&& !acpi_skip_set_wakeup_address()) {
74 acpi_wakeup_address
= acpi_get_wakeup_address();
75 if (!acpi_wakeup_address
)
77 acpi_set_waking_vector(acpi_wakeup_address
);
81 pr_info("Preparing to enter system sleep state S%d\n", acpi_state
);
82 acpi_enable_wakeup_devices(acpi_state
);
83 acpi_enter_sleep_state_prep(acpi_state
);
87 bool acpi_sleep_state_supported(u8 sleep_state
)
92 status
= acpi_get_sleep_type_data(sleep_state
, &type_a
, &type_b
);
93 return ACPI_SUCCESS(status
) && (!acpi_gbl_reduced_hardware
94 || (acpi_gbl_FADT
.sleep_control
.address
95 && acpi_gbl_FADT
.sleep_status
.address
));
98 #ifdef CONFIG_ACPI_SLEEP
99 static u32 acpi_target_sleep_state
= ACPI_STATE_S0
;
101 u32
acpi_target_system_state(void)
103 return acpi_target_sleep_state
;
105 EXPORT_SYMBOL_GPL(acpi_target_system_state
);
107 static bool pwr_btn_event_pending
;
110 * The ACPI specification wants us to save NVS memory regions during hibernation
111 * and to restore them during the subsequent resume. Windows does that also for
112 * suspend to RAM. However, it is known that this mechanism does not work on
113 * all machines, so we allow the user to disable it with the help of the
114 * 'acpi_sleep=nonvs' kernel command line option.
116 static bool nvs_nosave
;
118 void __init
acpi_nvs_nosave(void)
124 * The ACPI specification wants us to save NVS memory regions during hibernation
125 * but says nothing about saving NVS during S3. Not all versions of Windows
126 * save NVS on S3 suspend either, and it is clear that not all systems need
127 * NVS to be saved at S3 time. To improve suspend/resume time, allow the
128 * user to disable saving NVS on S3 if their system does not require it, but
129 * continue to save/restore NVS for S4 as specified.
131 static bool nvs_nosave_s3
;
133 void __init
acpi_nvs_nosave_s3(void)
135 nvs_nosave_s3
= true;
138 static int __init
init_nvs_save_s3(const struct dmi_system_id
*d
)
140 nvs_nosave_s3
= false;
145 * ACPI 1.0 wants us to execute _PTS before suspending devices, so we allow the
146 * user to request that behavior by using the 'acpi_old_suspend_ordering'
147 * kernel command line option that causes the following variable to be set.
149 static bool old_suspend_ordering
;
151 void __init
acpi_old_suspend_ordering(void)
153 old_suspend_ordering
= true;
156 static int __init
init_old_suspend_ordering(const struct dmi_system_id
*d
)
158 acpi_old_suspend_ordering();
162 static int __init
init_nvs_nosave(const struct dmi_system_id
*d
)
168 bool acpi_sleep_default_s3
;
170 static int __init
init_default_s3(const struct dmi_system_id
*d
)
172 acpi_sleep_default_s3
= true;
176 static const struct dmi_system_id acpisleep_dmi_table
[] __initconst
= {
178 .callback
= init_old_suspend_ordering
,
179 .ident
= "Abit KN9 (nForce4 variant)",
181 DMI_MATCH(DMI_BOARD_VENDOR
, "http://www.abit.com.tw/"),
182 DMI_MATCH(DMI_BOARD_NAME
, "KN9 Series(NF-CK804)"),
186 .callback
= init_old_suspend_ordering
,
187 .ident
= "HP xw4600 Workstation",
189 DMI_MATCH(DMI_SYS_VENDOR
, "Hewlett-Packard"),
190 DMI_MATCH(DMI_PRODUCT_NAME
, "HP xw4600 Workstation"),
194 .callback
= init_old_suspend_ordering
,
195 .ident
= "Asus Pundit P1-AH2 (M2N8L motherboard)",
197 DMI_MATCH(DMI_BOARD_VENDOR
, "ASUSTek Computer INC."),
198 DMI_MATCH(DMI_BOARD_NAME
, "M2N8L"),
202 .callback
= init_old_suspend_ordering
,
203 .ident
= "Panasonic CF51-2L",
205 DMI_MATCH(DMI_BOARD_VENDOR
,
206 "Matsushita Electric Industrial Co.,Ltd."),
207 DMI_MATCH(DMI_BOARD_NAME
, "CF51-2L"),
211 .callback
= init_nvs_nosave
,
212 .ident
= "Sony Vaio VGN-FW41E_H",
214 DMI_MATCH(DMI_SYS_VENDOR
, "Sony Corporation"),
215 DMI_MATCH(DMI_PRODUCT_NAME
, "VGN-FW41E_H"),
219 .callback
= init_nvs_nosave
,
220 .ident
= "Sony Vaio VGN-FW21E",
222 DMI_MATCH(DMI_SYS_VENDOR
, "Sony Corporation"),
223 DMI_MATCH(DMI_PRODUCT_NAME
, "VGN-FW21E"),
227 .callback
= init_nvs_nosave
,
228 .ident
= "Sony Vaio VGN-FW21M",
230 DMI_MATCH(DMI_SYS_VENDOR
, "Sony Corporation"),
231 DMI_MATCH(DMI_PRODUCT_NAME
, "VGN-FW21M"),
235 .callback
= init_nvs_nosave
,
236 .ident
= "Sony Vaio VPCEB17FX",
238 DMI_MATCH(DMI_SYS_VENDOR
, "Sony Corporation"),
239 DMI_MATCH(DMI_PRODUCT_NAME
, "VPCEB17FX"),
243 .callback
= init_nvs_nosave
,
244 .ident
= "Sony Vaio VGN-SR11M",
246 DMI_MATCH(DMI_SYS_VENDOR
, "Sony Corporation"),
247 DMI_MATCH(DMI_PRODUCT_NAME
, "VGN-SR11M"),
251 .callback
= init_nvs_nosave
,
252 .ident
= "Everex StepNote Series",
254 DMI_MATCH(DMI_SYS_VENDOR
, "Everex Systems, Inc."),
255 DMI_MATCH(DMI_PRODUCT_NAME
, "Everex StepNote Series"),
259 .callback
= init_nvs_nosave
,
260 .ident
= "Sony Vaio VPCEB1Z1E",
262 DMI_MATCH(DMI_SYS_VENDOR
, "Sony Corporation"),
263 DMI_MATCH(DMI_PRODUCT_NAME
, "VPCEB1Z1E"),
267 .callback
= init_nvs_nosave
,
268 .ident
= "Sony Vaio VGN-NW130D",
270 DMI_MATCH(DMI_SYS_VENDOR
, "Sony Corporation"),
271 DMI_MATCH(DMI_PRODUCT_NAME
, "VGN-NW130D"),
275 .callback
= init_nvs_nosave
,
276 .ident
= "Sony Vaio VPCCW29FX",
278 DMI_MATCH(DMI_SYS_VENDOR
, "Sony Corporation"),
279 DMI_MATCH(DMI_PRODUCT_NAME
, "VPCCW29FX"),
283 .callback
= init_nvs_nosave
,
284 .ident
= "Averatec AV1020-ED2",
286 DMI_MATCH(DMI_SYS_VENDOR
, "AVERATEC"),
287 DMI_MATCH(DMI_PRODUCT_NAME
, "1000 Series"),
291 .callback
= init_old_suspend_ordering
,
292 .ident
= "Asus A8N-SLI DELUXE",
294 DMI_MATCH(DMI_BOARD_VENDOR
, "ASUSTeK Computer INC."),
295 DMI_MATCH(DMI_BOARD_NAME
, "A8N-SLI DELUXE"),
299 .callback
= init_old_suspend_ordering
,
300 .ident
= "Asus A8N-SLI Premium",
302 DMI_MATCH(DMI_BOARD_VENDOR
, "ASUSTeK Computer INC."),
303 DMI_MATCH(DMI_BOARD_NAME
, "A8N-SLI Premium"),
307 .callback
= init_nvs_nosave
,
308 .ident
= "Sony Vaio VGN-SR26GN_P",
310 DMI_MATCH(DMI_SYS_VENDOR
, "Sony Corporation"),
311 DMI_MATCH(DMI_PRODUCT_NAME
, "VGN-SR26GN_P"),
315 .callback
= init_nvs_nosave
,
316 .ident
= "Sony Vaio VPCEB1S1E",
318 DMI_MATCH(DMI_SYS_VENDOR
, "Sony Corporation"),
319 DMI_MATCH(DMI_PRODUCT_NAME
, "VPCEB1S1E"),
323 .callback
= init_nvs_nosave
,
324 .ident
= "Sony Vaio VGN-FW520F",
326 DMI_MATCH(DMI_SYS_VENDOR
, "Sony Corporation"),
327 DMI_MATCH(DMI_PRODUCT_NAME
, "VGN-FW520F"),
331 .callback
= init_nvs_nosave
,
332 .ident
= "Asus K54C",
334 DMI_MATCH(DMI_SYS_VENDOR
, "ASUSTeK Computer Inc."),
335 DMI_MATCH(DMI_PRODUCT_NAME
, "K54C"),
339 .callback
= init_nvs_nosave
,
340 .ident
= "Asus K54HR",
342 DMI_MATCH(DMI_SYS_VENDOR
, "ASUSTeK Computer Inc."),
343 DMI_MATCH(DMI_PRODUCT_NAME
, "K54HR"),
347 .callback
= init_nvs_save_s3
,
348 .ident
= "Asus 1025C",
350 DMI_MATCH(DMI_SYS_VENDOR
, "ASUSTeK COMPUTER INC."),
351 DMI_MATCH(DMI_PRODUCT_NAME
, "1025C"),
355 * The ASUS ROG M16 from 2023 has many events which wake it from s2idle
356 * resulting in excessive battery drain and risk of laptop overheating,
357 * these events can be caused by the MMC or y AniMe display if installed.
358 * The match is valid for all of the GU604V<x> range.
361 .callback
= init_default_s3
,
362 .ident
= "ASUS ROG Zephyrus M16 (2023)",
364 DMI_MATCH(DMI_SYS_VENDOR
, "ASUSTeK COMPUTER INC."),
365 DMI_MATCH(DMI_PRODUCT_NAME
, "ROG Zephyrus M16 GU604V"),
369 * https://bugzilla.kernel.org/show_bug.cgi?id=189431
370 * Lenovo G50-45 is a platform later than 2012, but needs nvs memory
374 .callback
= init_nvs_save_s3
,
375 .ident
= "Lenovo G50-45",
377 DMI_MATCH(DMI_SYS_VENDOR
, "LENOVO"),
378 DMI_MATCH(DMI_PRODUCT_NAME
, "80E3"),
382 .callback
= init_nvs_save_s3
,
383 .ident
= "Lenovo G40-45",
385 DMI_MATCH(DMI_SYS_VENDOR
, "LENOVO"),
386 DMI_MATCH(DMI_PRODUCT_NAME
, "80E1"),
390 * ThinkPad X1 Tablet(2016) cannot do suspend-to-idle using
391 * the Low Power S0 Idle firmware interface (see
392 * https://bugzilla.kernel.org/show_bug.cgi?id=199057).
395 .callback
= init_default_s3
,
396 .ident
= "ThinkPad X1 Tablet(2016)",
398 DMI_MATCH(DMI_SYS_VENDOR
, "LENOVO"),
399 DMI_MATCH(DMI_PRODUCT_NAME
, "20GGA00L00"),
405 static bool ignore_blacklist
;
407 void __init
acpi_sleep_no_blacklist(void)
409 ignore_blacklist
= true;
412 static void __init
acpi_sleep_dmi_check(void)
414 if (ignore_blacklist
)
417 if (dmi_get_bios_year() >= 2012)
418 acpi_nvs_nosave_s3();
420 dmi_check_system(acpisleep_dmi_table
);
424 * acpi_pm_freeze - Disable the GPEs and suspend EC transactions.
426 static int acpi_pm_freeze(void)
428 acpi_disable_all_gpes();
429 acpi_os_wait_events_complete();
430 acpi_ec_block_transactions();
435 * acpi_pm_pre_suspend - Enable wakeup devices, "freeze" EC and save NVS.
437 static int acpi_pm_pre_suspend(void)
440 return suspend_nvs_save();
444 * __acpi_pm_prepare - Prepare the platform to enter the target state.
446 * If necessary, set the firmware waking vector and do arch-specific
447 * nastiness to get the wakeup code to the waking vector.
449 static int __acpi_pm_prepare(void)
451 int error
= acpi_sleep_prepare(acpi_target_sleep_state
);
453 acpi_target_sleep_state
= ACPI_STATE_S0
;
459 * acpi_pm_prepare - Prepare the platform to enter the target sleep
460 * state and disable the GPEs.
462 static int acpi_pm_prepare(void)
464 int error
= __acpi_pm_prepare();
466 error
= acpi_pm_pre_suspend();
472 * acpi_pm_finish - Instruct the platform to leave a sleep state.
474 * This is called after we wake back up (or if entering the sleep state
477 static void acpi_pm_finish(void)
479 struct acpi_device
*pwr_btn_adev
;
480 u32 acpi_state
= acpi_target_sleep_state
;
482 acpi_ec_unblock_transactions();
485 if (acpi_state
== ACPI_STATE_S0
)
488 pr_info("Waking up from system sleep state S%d\n", acpi_state
);
489 acpi_disable_wakeup_devices(acpi_state
);
490 acpi_leave_sleep_state(acpi_state
);
492 /* reset firmware waking vector */
493 acpi_set_waking_vector(0);
495 acpi_target_sleep_state
= ACPI_STATE_S0
;
497 acpi_resume_power_resources();
499 /* If we were woken with the fixed power button, provide a small
500 * hint to userspace in the form of a wakeup event on the fixed power
501 * button device (if it can be found).
503 * We delay the event generation til now, as the PM layer requires
504 * timekeeping to be running before we generate events. */
505 if (!pwr_btn_event_pending
)
508 pwr_btn_event_pending
= false;
509 pwr_btn_adev
= acpi_dev_get_first_match_dev(ACPI_BUTTON_HID_POWERF
,
512 pm_wakeup_event(&pwr_btn_adev
->dev
, 0);
513 acpi_dev_put(pwr_btn_adev
);
518 * acpi_pm_start - Start system PM transition.
519 * @acpi_state: The target ACPI power state to transition to.
521 static void acpi_pm_start(u32 acpi_state
)
523 acpi_target_sleep_state
= acpi_state
;
524 acpi_sleep_tts_switch(acpi_target_sleep_state
);
525 acpi_scan_lock_acquire();
529 * acpi_pm_end - Finish up system PM transition.
531 static void acpi_pm_end(void)
533 acpi_turn_off_unused_power_resources();
534 acpi_scan_lock_release();
536 * This is necessary in case acpi_pm_finish() is not called during a
537 * failing transition to a sleep state.
539 acpi_target_sleep_state
= ACPI_STATE_S0
;
540 acpi_sleep_tts_switch(acpi_target_sleep_state
);
542 #else /* !CONFIG_ACPI_SLEEP */
543 #define sleep_no_lps0 (1)
544 #define acpi_target_sleep_state ACPI_STATE_S0
545 #define acpi_sleep_default_s3 (1)
546 static inline void acpi_sleep_dmi_check(void) {}
547 #endif /* CONFIG_ACPI_SLEEP */
549 #ifdef CONFIG_SUSPEND
550 static u32 acpi_suspend_states
[] = {
551 [PM_SUSPEND_ON
] = ACPI_STATE_S0
,
552 [PM_SUSPEND_STANDBY
] = ACPI_STATE_S1
,
553 [PM_SUSPEND_MEM
] = ACPI_STATE_S3
,
554 [PM_SUSPEND_MAX
] = ACPI_STATE_S5
558 * acpi_suspend_begin - Set the target system sleep state to the state
559 * associated with given @pm_state, if supported.
560 * @pm_state: The target system power management state.
562 static int acpi_suspend_begin(suspend_state_t pm_state
)
564 u32 acpi_state
= acpi_suspend_states
[pm_state
];
567 error
= (nvs_nosave
|| nvs_nosave_s3
) ? 0 : suspend_nvs_alloc();
571 if (!sleep_states
[acpi_state
]) {
572 pr_err("ACPI does not support sleep state S%u\n", acpi_state
);
575 if (acpi_state
> ACPI_STATE_S1
)
576 pm_set_suspend_via_firmware();
578 acpi_pm_start(acpi_state
);
583 * acpi_suspend_enter - Actually enter a sleep state.
586 * Flush caches and go to sleep. For STR we have to call arch-specific
587 * assembly, which in turn call acpi_enter_sleep_state().
588 * It's unfortunate, but it works. Please fix if you're feeling frisky.
590 static int acpi_suspend_enter(suspend_state_t pm_state
)
592 acpi_status status
= AE_OK
;
593 u32 acpi_state
= acpi_target_sleep_state
;
596 trace_suspend_resume(TPS("acpi_suspend"), acpi_state
, true);
597 switch (acpi_state
) {
600 status
= acpi_enter_sleep_state(acpi_state
);
604 if (!acpi_suspend_lowlevel
)
606 error
= acpi_suspend_lowlevel();
609 pr_info("Low-level resume complete\n");
610 pm_set_resume_via_firmware();
613 trace_suspend_resume(TPS("acpi_suspend"), acpi_state
, false);
615 /* This violates the spec but is required for bug compatibility. */
616 acpi_write_bit_register(ACPI_BITREG_SCI_ENABLE
, 1);
618 /* Reprogram control registers */
619 acpi_leave_sleep_state_prep(acpi_state
);
621 /* ACPI 3.0 specs (P62) says that it's the responsibility
622 * of the OSPM to clear the status bit [ implying that the
623 * POWER_BUTTON event should not reach userspace ]
625 * However, we do generate a small hint for userspace in the form of
626 * a wakeup event. We flag this condition for now and generate the
627 * event later, as we're currently too early in resume to be able to
628 * generate wakeup events.
630 if (ACPI_SUCCESS(status
) && (acpi_state
== ACPI_STATE_S3
)) {
631 acpi_event_status pwr_btn_status
= ACPI_EVENT_FLAG_DISABLED
;
633 acpi_get_event_status(ACPI_EVENT_POWER_BUTTON
, &pwr_btn_status
);
635 if (pwr_btn_status
& ACPI_EVENT_FLAG_STATUS_SET
) {
636 acpi_clear_event(ACPI_EVENT_POWER_BUTTON
);
638 pwr_btn_event_pending
= true;
643 * Disable all GPE and clear their status bits before interrupts are
644 * enabled. Some GPEs (like wakeup GPEs) have no handlers and this can
645 * prevent them from producing spurious interrups.
647 * acpi_leave_sleep_state() will reenable specific GPEs later.
649 * Because this code runs on one CPU with disabled interrupts (all of
650 * the other CPUs are offline at this time), it need not acquire any
651 * sleeping locks which may trigger an implicit preemption point even
652 * if there is no contention, so avoid doing that by using a low-level
653 * library routine here.
655 acpi_hw_disable_all_gpes();
656 /* Allow EC transactions to happen. */
657 acpi_ec_unblock_transactions();
659 suspend_nvs_restore();
661 return ACPI_SUCCESS(status
) ? 0 : -EFAULT
;
664 static int acpi_suspend_state_valid(suspend_state_t pm_state
)
670 case PM_SUSPEND_STANDBY
:
672 acpi_state
= acpi_suspend_states
[pm_state
];
674 return sleep_states
[acpi_state
];
680 static const struct platform_suspend_ops acpi_suspend_ops
= {
681 .valid
= acpi_suspend_state_valid
,
682 .begin
= acpi_suspend_begin
,
683 .prepare_late
= acpi_pm_prepare
,
684 .enter
= acpi_suspend_enter
,
685 .wake
= acpi_pm_finish
,
690 * acpi_suspend_begin_old - Set the target system sleep state to the
691 * state associated with given @pm_state, if supported, and
692 * execute the _PTS control method. This function is used if the
693 * pre-ACPI 2.0 suspend ordering has been requested.
694 * @pm_state: The target suspend state for the system.
696 static int acpi_suspend_begin_old(suspend_state_t pm_state
)
698 int error
= acpi_suspend_begin(pm_state
);
700 error
= __acpi_pm_prepare();
706 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
709 static const struct platform_suspend_ops acpi_suspend_ops_old
= {
710 .valid
= acpi_suspend_state_valid
,
711 .begin
= acpi_suspend_begin_old
,
712 .prepare_late
= acpi_pm_pre_suspend
,
713 .enter
= acpi_suspend_enter
,
714 .wake
= acpi_pm_finish
,
716 .recover
= acpi_pm_finish
,
719 static bool s2idle_wakeup
;
721 int acpi_s2idle_begin(void)
723 acpi_scan_lock_acquire();
727 int acpi_s2idle_prepare(void)
729 if (acpi_sci_irq_valid()) {
732 error
= enable_irq_wake(acpi_sci_irq
);
734 pr_warn("Warning: Failed to enable wakeup from IRQ %d: %d\n",
735 acpi_sci_irq
, error
);
737 acpi_ec_set_gpe_wake_mask(ACPI_GPE_ENABLE
);
740 acpi_enable_wakeup_devices(ACPI_STATE_S0
);
742 /* Change the configuration of GPEs to avoid spurious wakeup. */
743 acpi_enable_all_wakeup_gpes();
744 acpi_os_wait_events_complete();
746 s2idle_wakeup
= true;
750 bool acpi_s2idle_wake(void)
752 if (!acpi_sci_irq_valid())
753 return pm_wakeup_pending();
755 while (pm_wakeup_pending()) {
757 * If IRQD_WAKEUP_ARMED is set for the SCI at this point, the
758 * SCI has not triggered while suspended, so bail out (the
759 * wakeup is pending anyway and the SCI is not the source of
762 if (irqd_is_wakeup_armed(irq_get_irq_data(acpi_sci_irq
))) {
763 pm_pr_dbg("Wakeup unrelated to ACPI SCI\n");
768 * If the status bit of any enabled fixed event is set, the
769 * wakeup is regarded as valid.
771 if (acpi_any_fixed_event_status_set()) {
772 pm_pr_dbg("ACPI fixed event wakeup\n");
776 /* Check wakeups from drivers sharing the SCI. */
777 if (acpi_check_wakeup_handlers()) {
778 pm_pr_dbg("ACPI custom handler wakeup\n");
783 * Check non-EC GPE wakeups and if there are none, cancel the
784 * SCI-related wakeup and dispatch the EC GPE.
786 if (acpi_ec_dispatch_gpe()) {
787 pm_pr_dbg("ACPI non-EC GPE wakeup\n");
791 acpi_os_wait_events_complete();
794 * The SCI is in the "suspended" state now and it cannot produce
795 * new wakeup events till the rearming below, so if any of them
796 * are pending here, they must be resulting from the processing
797 * of EC events above or coming from somewhere else.
799 if (pm_wakeup_pending()) {
800 pm_pr_dbg("Wakeup after ACPI Notify sync\n");
804 pm_pr_dbg("Rearming ACPI SCI for wakeup\n");
806 pm_wakeup_clear(acpi_sci_irq
);
807 rearm_wake_irq(acpi_sci_irq
);
813 void acpi_s2idle_restore(void)
816 * Drain pending events before restoring the working-state configuration
819 acpi_os_wait_events_complete(); /* synchronize GPE processing */
820 acpi_ec_flush_work(); /* flush the EC driver's workqueues */
821 acpi_os_wait_events_complete(); /* synchronize Notify handling */
823 s2idle_wakeup
= false;
825 acpi_enable_all_runtime_gpes();
827 acpi_disable_wakeup_devices(ACPI_STATE_S0
);
829 if (acpi_sci_irq_valid()) {
830 acpi_ec_set_gpe_wake_mask(ACPI_GPE_DISABLE
);
831 disable_irq_wake(acpi_sci_irq
);
835 void acpi_s2idle_end(void)
837 acpi_scan_lock_release();
840 static const struct platform_s2idle_ops acpi_s2idle_ops
= {
841 .begin
= acpi_s2idle_begin
,
842 .prepare
= acpi_s2idle_prepare
,
843 .wake
= acpi_s2idle_wake
,
844 .restore
= acpi_s2idle_restore
,
845 .end
= acpi_s2idle_end
,
848 void __weak
acpi_s2idle_setup(void)
850 if (acpi_gbl_FADT
.flags
& ACPI_FADT_LOW_POWER_S0
)
851 pr_info("Efficient low-power S0 idle declared\n");
853 s2idle_set_ops(&acpi_s2idle_ops
);
856 static void __init
acpi_sleep_suspend_setup(void)
858 bool suspend_ops_needed
= false;
861 for (i
= ACPI_STATE_S1
; i
< ACPI_STATE_S4
; i
++)
862 if (acpi_sleep_state_supported(i
)) {
864 suspend_ops_needed
= true;
867 if (suspend_ops_needed
)
868 suspend_set_ops(old_suspend_ordering
?
869 &acpi_suspend_ops_old
: &acpi_suspend_ops
);
874 #else /* !CONFIG_SUSPEND */
875 #define s2idle_wakeup (false)
876 static inline void acpi_sleep_suspend_setup(void) {}
877 #endif /* !CONFIG_SUSPEND */
879 bool acpi_s2idle_wakeup(void)
881 return s2idle_wakeup
;
884 #ifdef CONFIG_PM_SLEEP
885 static u32 saved_bm_rld
;
887 static int acpi_save_bm_rld(void)
889 acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD
, &saved_bm_rld
);
893 static void acpi_restore_bm_rld(void)
895 u32 resumed_bm_rld
= 0;
897 acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD
, &resumed_bm_rld
);
898 if (resumed_bm_rld
== saved_bm_rld
)
901 acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD
, saved_bm_rld
);
904 static struct syscore_ops acpi_sleep_syscore_ops
= {
905 .suspend
= acpi_save_bm_rld
,
906 .resume
= acpi_restore_bm_rld
,
909 static void acpi_sleep_syscore_init(void)
911 register_syscore_ops(&acpi_sleep_syscore_ops
);
914 static inline void acpi_sleep_syscore_init(void) {}
915 #endif /* CONFIG_PM_SLEEP */
917 #ifdef CONFIG_HIBERNATION
918 static unsigned long s4_hardware_signature
;
919 static struct acpi_table_facs
*facs
;
920 int acpi_check_s4_hw_signature
= -1; /* Default behaviour is just to warn */
922 static int acpi_hibernation_begin(pm_message_t stage
)
925 int error
= suspend_nvs_alloc();
930 if (stage
.event
== PM_EVENT_HIBERNATE
)
931 pm_set_suspend_via_firmware();
933 acpi_pm_start(ACPI_STATE_S4
);
937 static int acpi_hibernation_enter(void)
939 acpi_status status
= AE_OK
;
941 /* This shouldn't return. If it returns, we have a problem */
942 status
= acpi_enter_sleep_state(ACPI_STATE_S4
);
943 /* Reprogram control registers */
944 acpi_leave_sleep_state_prep(ACPI_STATE_S4
);
946 return ACPI_SUCCESS(status
) ? 0 : -EFAULT
;
949 static void acpi_hibernation_leave(void)
951 pm_set_resume_via_firmware();
953 * If ACPI is not enabled by the BIOS and the boot kernel, we need to
957 /* Reprogram control registers */
958 acpi_leave_sleep_state_prep(ACPI_STATE_S4
);
959 /* Check the hardware signature */
960 if (facs
&& s4_hardware_signature
!= facs
->hardware_signature
)
961 pr_crit("Hardware changed while hibernated, success doubtful!\n");
962 /* Restore the NVS memory area */
963 suspend_nvs_restore();
964 /* Allow EC transactions to happen. */
965 acpi_ec_unblock_transactions();
968 static void acpi_pm_thaw(void)
970 acpi_ec_unblock_transactions();
971 acpi_enable_all_runtime_gpes();
974 static const struct platform_hibernation_ops acpi_hibernation_ops
= {
975 .begin
= acpi_hibernation_begin
,
977 .pre_snapshot
= acpi_pm_prepare
,
978 .finish
= acpi_pm_finish
,
979 .prepare
= acpi_pm_prepare
,
980 .enter
= acpi_hibernation_enter
,
981 .leave
= acpi_hibernation_leave
,
982 .pre_restore
= acpi_pm_freeze
,
983 .restore_cleanup
= acpi_pm_thaw
,
987 * acpi_hibernation_begin_old - Set the target system sleep state to
988 * ACPI_STATE_S4 and execute the _PTS control method. This
989 * function is used if the pre-ACPI 2.0 suspend ordering has been
991 * @stage: The power management event message.
993 static int acpi_hibernation_begin_old(pm_message_t stage
)
997 * The _TTS object should always be evaluated before the _PTS object.
998 * When the old_suspended_ordering is true, the _PTS object is
999 * evaluated in the acpi_sleep_prepare.
1001 acpi_sleep_tts_switch(ACPI_STATE_S4
);
1003 error
= acpi_sleep_prepare(ACPI_STATE_S4
);
1008 error
= suspend_nvs_alloc();
1013 if (stage
.event
== PM_EVENT_HIBERNATE
)
1014 pm_set_suspend_via_firmware();
1016 acpi_target_sleep_state
= ACPI_STATE_S4
;
1017 acpi_scan_lock_acquire();
1022 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
1025 static const struct platform_hibernation_ops acpi_hibernation_ops_old
= {
1026 .begin
= acpi_hibernation_begin_old
,
1028 .pre_snapshot
= acpi_pm_pre_suspend
,
1029 .prepare
= acpi_pm_freeze
,
1030 .finish
= acpi_pm_finish
,
1031 .enter
= acpi_hibernation_enter
,
1032 .leave
= acpi_hibernation_leave
,
1033 .pre_restore
= acpi_pm_freeze
,
1034 .restore_cleanup
= acpi_pm_thaw
,
1035 .recover
= acpi_pm_finish
,
1038 static void acpi_sleep_hibernate_setup(void)
1040 if (!acpi_sleep_state_supported(ACPI_STATE_S4
))
1043 hibernation_set_ops(old_suspend_ordering
?
1044 &acpi_hibernation_ops_old
: &acpi_hibernation_ops
);
1045 sleep_states
[ACPI_STATE_S4
] = 1;
1046 if (!acpi_check_s4_hw_signature
)
1049 acpi_get_table(ACPI_SIG_FACS
, 1, (struct acpi_table_header
**)&facs
);
1052 * s4_hardware_signature is the local variable which is just
1053 * used to warn about mismatch after we're attempting to
1054 * resume (in violation of the ACPI specification.)
1056 s4_hardware_signature
= facs
->hardware_signature
;
1058 if (acpi_check_s4_hw_signature
> 0) {
1060 * If we're actually obeying the ACPI specification
1061 * then the signature is written out as part of the
1062 * swsusp header, in order to allow the boot kernel
1063 * to gracefully decline to resume.
1065 swsusp_hardware_signature
= facs
->hardware_signature
;
1069 #else /* !CONFIG_HIBERNATION */
1070 static inline void acpi_sleep_hibernate_setup(void) {}
1071 #endif /* !CONFIG_HIBERNATION */
1073 static int acpi_power_off_prepare(struct sys_off_data
*data
)
1075 /* Prepare to power off the system */
1076 acpi_sleep_prepare(ACPI_STATE_S5
);
1077 acpi_disable_all_gpes();
1078 acpi_os_wait_events_complete();
1082 static int acpi_power_off(struct sys_off_data
*data
)
1084 /* acpi_sleep_prepare(ACPI_STATE_S5) should have already been called */
1085 pr_debug("%s called\n", __func__
);
1086 local_irq_disable();
1087 acpi_enter_sleep_state(ACPI_STATE_S5
);
1091 int __init
acpi_sleep_init(void)
1093 char supported
[ACPI_S_STATE_COUNT
* 3 + 1];
1094 char *pos
= supported
;
1097 acpi_sleep_dmi_check();
1099 sleep_states
[ACPI_STATE_S0
] = 1;
1101 acpi_sleep_syscore_init();
1102 acpi_sleep_suspend_setup();
1103 acpi_sleep_hibernate_setup();
1105 if (acpi_sleep_state_supported(ACPI_STATE_S5
)) {
1106 sleep_states
[ACPI_STATE_S5
] = 1;
1108 register_sys_off_handler(SYS_OFF_MODE_POWER_OFF_PREPARE
,
1109 SYS_OFF_PRIO_FIRMWARE
,
1110 acpi_power_off_prepare
, NULL
);
1112 register_sys_off_handler(SYS_OFF_MODE_POWER_OFF
,
1113 SYS_OFF_PRIO_FIRMWARE
,
1114 acpi_power_off
, NULL
);
1117 * Windows uses S5 for reboot, so some BIOSes depend on it to
1118 * perform proper reboot.
1120 register_sys_off_handler(SYS_OFF_MODE_RESTART_PREPARE
,
1121 SYS_OFF_PRIO_FIRMWARE
,
1122 acpi_power_off_prepare
, NULL
);
1128 for (i
= 0; i
< ACPI_S_STATE_COUNT
; i
++) {
1129 if (sleep_states
[i
])
1130 pos
+= sprintf(pos
, " S%d", i
);
1132 pr_info("(supports%s)\n", supported
);
1135 * Register the tts_notifier to reboot notifier list so that the _TTS
1136 * object can also be evaluated when the system enters S5.
1138 register_reboot_notifier(&tts_notifier
);