1 // SPDX-License-Identifier: GPL-2.0-only
3 * Hibernate support specific for ARM64
5 * Derived from work on ARM hibernation support by:
7 * Ubuntu project, hibernation support for mach-dove
8 * Copyright (C) 2010 Nokia Corporation (Hiroshi Doyu)
9 * Copyright (C) 2010 Texas Instruments, Inc. (Teerth Reddy et al.)
10 * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
12 #define pr_fmt(x) "hibernate: " x
13 #include <linux/cpu.h>
14 #include <linux/kvm_host.h>
16 #include <linux/sched.h>
17 #include <linux/suspend.h>
18 #include <linux/utsname.h>
20 #include <asm/barrier.h>
21 #include <asm/cacheflush.h>
22 #include <asm/cputype.h>
23 #include <asm/daifflags.h>
24 #include <asm/irqflags.h>
25 #include <asm/kexec.h>
26 #include <asm/memory.h>
27 #include <asm/mmu_context.h>
29 #include <asm/sections.h>
31 #include <asm/smp_plat.h>
32 #include <asm/suspend.h>
33 #include <asm/sysreg.h>
34 #include <asm/trans_pgd.h>
38 * Hibernate core relies on this value being 0 on resume, and marks it
39 * __nosavedata assuming it will keep the resume kernel's '0' value. This
40 * doesn't happen with either KASLR.
42 * defined as "__visible int in_suspend __nosavedata" in
43 * kernel/power/hibernate.c
45 extern int in_suspend
;
47 /* Do we need to reset el2? */
48 #define el2_reset_needed() (is_hyp_nvhe())
50 /* hyp-stub vectors, used to restore el2 during resume from hibernate. */
51 extern char __hyp_stub_vectors
[];
54 * The logical cpu number we should resume on, initialised to a non-cpu
57 static int sleep_cpu
= -EINVAL
;
60 * Values that may not change over hibernate/resume. We put the build number
61 * and date in here so that we guarantee not to resume with a different
64 struct arch_hibernate_hdr_invariants
{
65 char uts_version
[__NEW_UTS_LEN
+ 1];
68 /* These values need to be know across a hibernate/restore. */
69 static struct arch_hibernate_hdr
{
70 struct arch_hibernate_hdr_invariants invariants
;
72 /* These are needed to find the relocated kernel if built with kaslr */
73 phys_addr_t ttbr1_el1
;
74 void (*reenter_kernel
)(void);
77 * We need to know where the __hyp_stub_vectors are after restore to
80 phys_addr_t __hyp_stub_vectors
;
85 static inline void arch_hdr_invariants(struct arch_hibernate_hdr_invariants
*i
)
87 memset(i
, 0, sizeof(*i
));
88 memcpy(i
->uts_version
, init_utsname()->version
, sizeof(i
->uts_version
));
91 int pfn_is_nosave(unsigned long pfn
)
93 unsigned long nosave_begin_pfn
= sym_to_pfn(&__nosave_begin
);
94 unsigned long nosave_end_pfn
= sym_to_pfn(&__nosave_end
- 1);
96 return ((pfn
>= nosave_begin_pfn
) && (pfn
<= nosave_end_pfn
)) ||
100 void notrace
save_processor_state(void)
104 void notrace
restore_processor_state(void)
108 int arch_hibernation_header_save(void *addr
, unsigned int max_size
)
110 struct arch_hibernate_hdr
*hdr
= addr
;
112 if (max_size
< sizeof(*hdr
))
115 arch_hdr_invariants(&hdr
->invariants
);
116 hdr
->ttbr1_el1
= __pa_symbol(swapper_pg_dir
);
117 hdr
->reenter_kernel
= _cpu_resume
;
119 /* We can't use __hyp_get_vectors() because kvm may still be loaded */
120 if (el2_reset_needed())
121 hdr
->__hyp_stub_vectors
= __pa_symbol(__hyp_stub_vectors
);
123 hdr
->__hyp_stub_vectors
= 0;
125 /* Save the mpidr of the cpu we called cpu_suspend() on... */
127 pr_err("Failing to hibernate on an unknown CPU.\n");
130 hdr
->sleep_cpu_mpidr
= cpu_logical_map(sleep_cpu
);
131 pr_info("Hibernating on CPU %d [mpidr:0x%llx]\n", sleep_cpu
,
132 hdr
->sleep_cpu_mpidr
);
136 EXPORT_SYMBOL(arch_hibernation_header_save
);
138 int arch_hibernation_header_restore(void *addr
)
141 struct arch_hibernate_hdr_invariants invariants
;
142 struct arch_hibernate_hdr
*hdr
= addr
;
144 arch_hdr_invariants(&invariants
);
145 if (memcmp(&hdr
->invariants
, &invariants
, sizeof(invariants
))) {
146 pr_crit("Hibernate image not generated by this kernel!\n");
150 sleep_cpu
= get_logical_index(hdr
->sleep_cpu_mpidr
);
151 pr_info("Hibernated on CPU %d [mpidr:0x%llx]\n", sleep_cpu
,
152 hdr
->sleep_cpu_mpidr
);
154 pr_crit("Hibernated on a CPU not known to this kernel!\n");
159 ret
= bringup_hibernate_cpu(sleep_cpu
);
169 EXPORT_SYMBOL(arch_hibernation_header_restore
);
171 static void *hibernate_page_alloc(void *arg
)
173 return (void *)get_safe_page((__force gfp_t
)(unsigned long)arg
);
177 * Copies length bytes, starting at src_start into an new page,
178 * perform cache maintenance, then maps it at the specified address low
179 * address as executable.
181 * This is used by hibernate to copy the code it needs to execute when
182 * overwriting the kernel text. This function generates a new set of page
183 * tables, which it loads into ttbr0.
185 * Length is provided as we probably only want 4K of data, even on a 64K
188 static int create_safe_exec_page(void *src_start
, size_t length
,
189 phys_addr_t
*phys_dst_addr
)
191 struct trans_pgd_info trans_info
= {
192 .trans_alloc_page
= hibernate_page_alloc
,
193 .trans_alloc_arg
= (__force
void *)GFP_ATOMIC
,
196 void *page
= (void *)get_safe_page(GFP_ATOMIC
);
197 phys_addr_t trans_ttbr0
;
204 memcpy(page
, src_start
, length
);
205 caches_clean_inval_pou((unsigned long)page
, (unsigned long)page
+ length
);
206 rc
= trans_pgd_idmap_page(&trans_info
, &trans_ttbr0
, &t0sz
, page
);
210 cpu_install_ttbr0(trans_ttbr0
, t0sz
);
211 *phys_dst_addr
= virt_to_phys(page
);
216 #ifdef CONFIG_ARM64_MTE
218 static DEFINE_XARRAY(mte_pages
);
220 static int save_tags(struct page
*page
, unsigned long pfn
)
222 void *tag_storage
, *ret
;
224 tag_storage
= mte_allocate_tag_storage();
228 mte_save_page_tags(page_address(page
), tag_storage
);
230 ret
= xa_store(&mte_pages
, pfn
, tag_storage
, GFP_KERNEL
);
231 if (WARN(xa_is_err(ret
), "Failed to store MTE tags")) {
232 mte_free_tag_storage(tag_storage
);
234 } else if (WARN(ret
, "swsusp: %s: Duplicate entry", __func__
)) {
235 mte_free_tag_storage(ret
);
241 static void swsusp_mte_free_storage(void)
243 XA_STATE(xa_state
, &mte_pages
, 0);
247 xas_for_each(&xa_state
, tags
, ULONG_MAX
) {
248 mte_free_tag_storage(tags
);
250 xa_unlock(&mte_pages
);
252 xa_destroy(&mte_pages
);
255 static int swsusp_mte_save_tags(void)
258 unsigned long pfn
, max_zone_pfn
;
262 if (!system_supports_mte())
265 for_each_populated_zone(zone
) {
266 max_zone_pfn
= zone_end_pfn(zone
);
267 for (pfn
= zone
->zone_start_pfn
; pfn
< max_zone_pfn
; pfn
++) {
268 struct page
*page
= pfn_to_online_page(pfn
);
273 folio
= page_folio(page
);
275 if (folio_test_hugetlb(folio
) &&
276 !folio_test_hugetlb_mte_tagged(folio
))
279 if (!page_mte_tagged(page
))
282 ret
= save_tags(page
, pfn
);
284 swsusp_mte_free_storage();
291 pr_info("Saved %d MTE pages\n", n
);
297 static void swsusp_mte_restore_tags(void)
299 XA_STATE(xa_state
, &mte_pages
, 0);
304 xas_for_each(&xa_state
, tags
, ULONG_MAX
) {
305 unsigned long pfn
= xa_state
.xa_index
;
306 struct page
*page
= pfn_to_online_page(pfn
);
308 mte_restore_page_tags(page_address(page
), tags
);
310 mte_free_tag_storage(tags
);
313 xa_unlock(&mte_pages
);
315 pr_info("Restored %d MTE pages\n", n
);
317 xa_destroy(&mte_pages
);
320 #else /* CONFIG_ARM64_MTE */
322 static int swsusp_mte_save_tags(void)
327 static void swsusp_mte_restore_tags(void)
331 #endif /* CONFIG_ARM64_MTE */
333 int swsusp_arch_suspend(void)
337 struct sleep_stack_data state
;
339 if (cpus_are_stuck_in_kernel()) {
340 pr_err("Can't hibernate: no mechanism to offline secondary CPUs.\n");
344 flags
= local_daif_save();
346 if (__cpu_suspend_enter(&state
)) {
347 /* make the crash dump kernel image visible/saveable */
348 crash_prepare_suspend();
350 ret
= swsusp_mte_save_tags();
354 sleep_cpu
= smp_processor_id();
357 /* Clean kernel core startup/idle code to PoC*/
358 dcache_clean_inval_poc((unsigned long)__mmuoff_data_start
,
359 (unsigned long)__mmuoff_data_end
);
360 dcache_clean_inval_poc((unsigned long)__idmap_text_start
,
361 (unsigned long)__idmap_text_end
);
363 /* Clean kvm setup code to PoC? */
364 if (el2_reset_needed()) {
365 dcache_clean_inval_poc(
366 (unsigned long)__hyp_idmap_text_start
,
367 (unsigned long)__hyp_idmap_text_end
);
368 dcache_clean_inval_poc((unsigned long)__hyp_text_start
,
369 (unsigned long)__hyp_text_end
);
372 swsusp_mte_restore_tags();
374 /* make the crash dump kernel image protected again */
378 * Tell the hibernation core that we've just restored
384 __cpu_suspend_exit();
387 * Just in case the boot kernel did turn the SSBD
388 * mitigation off behind our back, let's set the state
389 * to what we expect it to be.
391 spectre_v4_enable_mitigation(NULL
);
394 local_daif_restore(flags
);
400 * Setup then Resume from the hibernate image using swsusp_arch_suspend_exit().
402 * Memory allocated by get_safe_page() will be dealt with by the hibernate code,
403 * we don't need to free it here.
405 int swsusp_arch_resume(void)
411 phys_addr_t el2_vectors
;
412 void __noreturn (*hibernate_exit
)(phys_addr_t
, phys_addr_t
, void *,
413 void *, phys_addr_t
, phys_addr_t
);
414 struct trans_pgd_info trans_info
= {
415 .trans_alloc_page
= hibernate_page_alloc
,
416 .trans_alloc_arg
= (__force
void *)GFP_ATOMIC
,
420 * Restoring the memory image will overwrite the ttbr1 page tables.
421 * Create a second copy of just the linear map, and use this when
424 rc
= trans_pgd_create_copy(&trans_info
, &tmp_pg_dir
, PAGE_OFFSET
,
430 * We need a zero page that is zero before & after resume in order
431 * to break before make on the ttbr1 page tables.
433 zero_page
= (void *)get_safe_page(GFP_ATOMIC
);
435 pr_err("Failed to allocate zero page.\n");
439 if (el2_reset_needed()) {
440 rc
= trans_pgd_copy_el2_vectors(&trans_info
, &el2_vectors
);
442 pr_err("Failed to setup el2 vectors\n");
447 exit_size
= __hibernate_exit_text_end
- __hibernate_exit_text_start
;
449 * Copy swsusp_arch_suspend_exit() to a safe page. This will generate
450 * a new set of ttbr0 page tables and load them.
452 rc
= create_safe_exec_page(__hibernate_exit_text_start
, exit_size
,
453 (phys_addr_t
*)&hibernate_exit
);
455 pr_err("Failed to create safe executable page for hibernate_exit code.\n");
460 * KASLR will cause the el2 vectors to be in a different location in
461 * the resumed kernel. Load hibernate's temporary copy into el2.
463 * We can skip this step if we booted at EL1, or are running with VHE.
465 if (el2_reset_needed())
466 __hyp_set_vectors(el2_vectors
);
468 hibernate_exit(virt_to_phys(tmp_pg_dir
), resume_hdr
.ttbr1_el1
,
469 resume_hdr
.reenter_kernel
, restore_pblist
,
470 resume_hdr
.__hyp_stub_vectors
, virt_to_phys(zero_page
));
475 int hibernate_resume_nonboot_cpu_disable(void)
478 pr_err("Failing to resume from hibernate on an unknown CPU.\n");
482 return freeze_secondary_cpus(sleep_cpu
);