1 // SPDX-License-Identifier: GPL-2.0-only
3 * Hibernation support for x86-64
5 * Copyright (c) 2007 Rafael J. Wysocki <rjw@sisk.pl>
6 * Copyright (c) 2002 Pavel Machek <pavel@ucw.cz>
7 * Copyright (c) 2001 Patrick Mochel <mochel@osdl.org>
10 #include <linux/gfp.h>
11 #include <linux/smp.h>
12 #include <linux/suspend.h>
13 #include <linux/scatterlist.h>
14 #include <linux/kdebug.h>
15 #include <linux/pgtable.h>
17 #include <crypto/hash.h>
19 #include <asm/e820/api.h>
21 #include <asm/proto.h>
24 #include <asm/sections.h>
25 #include <asm/suspend.h>
26 #include <asm/tlbflush.h>
28 static int set_up_temporary_text_mapping(pgd_t
*pgd
)
33 pgprot_t pgtable_prot
= __pgprot(_KERNPG_TABLE
);
34 pgprot_t pmd_text_prot
= __pgprot(__PAGE_KERNEL_LARGE_EXEC
);
36 /* Filter out unsupported __PAGE_KERNEL* bits: */
37 pgprot_val(pmd_text_prot
) &= __default_kernel_pte_mask
;
38 pgprot_val(pgtable_prot
) &= __default_kernel_pte_mask
;
41 * The new mapping only has to cover the page containing the image
42 * kernel's entry point (jump_address_phys), because the switch over to
43 * it is carried out by relocated code running from a page allocated
44 * specifically for this purpose and covered by the identity mapping, so
45 * the temporary kernel text mapping is only needed for the final jump.
46 * Moreover, in that mapping the virtual address of the image kernel's
47 * entry point must be the same as its virtual address in the image
48 * kernel (restore_jump_address), so the image kernel's
49 * restore_registers() code doesn't find itself in a different area of
50 * the virtual address space after switching over to the original page
51 * tables used by the image kernel.
54 if (pgtable_l5_enabled()) {
55 p4d
= (p4d_t
*)get_safe_page(GFP_ATOMIC
);
60 pud
= (pud_t
*)get_safe_page(GFP_ATOMIC
);
64 pmd
= (pmd_t
*)get_safe_page(GFP_ATOMIC
);
68 set_pmd(pmd
+ pmd_index(restore_jump_address
),
69 __pmd((jump_address_phys
& PMD_MASK
) | pgprot_val(pmd_text_prot
)));
70 set_pud(pud
+ pud_index(restore_jump_address
),
71 __pud(__pa(pmd
) | pgprot_val(pgtable_prot
)));
73 p4d_t new_p4d
= __p4d(__pa(pud
) | pgprot_val(pgtable_prot
));
74 pgd_t new_pgd
= __pgd(__pa(p4d
) | pgprot_val(pgtable_prot
));
76 set_p4d(p4d
+ p4d_index(restore_jump_address
), new_p4d
);
77 set_pgd(pgd
+ pgd_index(restore_jump_address
), new_pgd
);
79 /* No p4d for 4-level paging: point the pgd to the pud page table */
80 pgd_t new_pgd
= __pgd(__pa(pud
) | pgprot_val(pgtable_prot
));
81 set_pgd(pgd
+ pgd_index(restore_jump_address
), new_pgd
);
87 static void *alloc_pgt_page(void *context
)
89 return (void *)get_safe_page(GFP_ATOMIC
);
92 static int set_up_temporary_mappings(void)
94 struct x86_mapping_info info
= {
95 .alloc_pgt_page
= alloc_pgt_page
,
96 .page_flag
= __PAGE_KERNEL_LARGE_EXEC
,
97 .offset
= __PAGE_OFFSET
,
99 unsigned long mstart
, mend
;
104 pgd
= (pgd_t
*)get_safe_page(GFP_ATOMIC
);
108 /* Prepare a temporary mapping for the kernel text */
109 result
= set_up_temporary_text_mapping(pgd
);
113 /* Set up the direct mapping from scratch */
114 for (i
= 0; i
< nr_pfn_mapped
; i
++) {
115 mstart
= pfn_mapped
[i
].start
<< PAGE_SHIFT
;
116 mend
= pfn_mapped
[i
].end
<< PAGE_SHIFT
;
118 result
= kernel_ident_mapping_init(&info
, pgd
, mstart
, mend
);
123 temp_pgt
= __pa(pgd
);
127 asmlinkage
int swsusp_arch_resume(void)
131 /* We have got enough memory and from now on we cannot recover */
132 error
= set_up_temporary_mappings();
136 error
= relocate_restore_code();