2 * Hibernation support for x86-64
4 * Distribute under GPLv2
6 * Copyright (c) 2007 Rafael J. Wysocki <rjw@sisk.pl>
7 * Copyright (c) 2002 Pavel Machek <pavel@ucw.cz>
8 * Copyright (c) 2001 Patrick Mochel <mochel@osdl.org>
11 #include <linux/gfp.h>
12 #include <linux/smp.h>
13 #include <linux/suspend.h>
14 #include <linux/scatterlist.h>
15 #include <linux/kdebug.h>
16 #include <linux/cpu.h>
18 #include <crypto/hash.h>
20 #include <asm/e820/api.h>
22 #include <asm/proto.h>
24 #include <asm/pgtable.h>
26 #include <asm/sections.h>
27 #include <asm/suspend.h>
28 #include <asm/tlbflush.h>
30 /* Defined in hibernate_asm_64.S */
31 extern asmlinkage __visible
int restore_image(void);
34 * Address to jump to in the last phase of restore in order to get to the image
35 * kernel's text (this value is passed in the image header).
37 unsigned long restore_jump_address __visible
;
38 unsigned long jump_address_phys
;
41 * Value of the cr3 register from before the hibernation (this value is passed
42 * in the image header).
44 unsigned long restore_cr3 __visible
;
46 unsigned long temp_level4_pgt __visible
;
48 unsigned long relocated_restore_code __visible
;
50 static int set_up_temporary_text_mapping(pgd_t
*pgd
)
55 pgprot_t pgtable_prot
= __pgprot(_KERNPG_TABLE
);
56 pgprot_t pmd_text_prot
= __pgprot(__PAGE_KERNEL_LARGE_EXEC
);
58 /* Filter out unsupported __PAGE_KERNEL* bits: */
59 pgprot_val(pmd_text_prot
) &= __default_kernel_pte_mask
;
60 pgprot_val(pgtable_prot
) &= __default_kernel_pte_mask
;
63 * The new mapping only has to cover the page containing the image
64 * kernel's entry point (jump_address_phys), because the switch over to
65 * it is carried out by relocated code running from a page allocated
66 * specifically for this purpose and covered by the identity mapping, so
67 * the temporary kernel text mapping is only needed for the final jump.
68 * Moreover, in that mapping the virtual address of the image kernel's
69 * entry point must be the same as its virtual address in the image
70 * kernel (restore_jump_address), so the image kernel's
71 * restore_registers() code doesn't find itself in a different area of
72 * the virtual address space after switching over to the original page
73 * tables used by the image kernel.
76 if (pgtable_l5_enabled()) {
77 p4d
= (p4d_t
*)get_safe_page(GFP_ATOMIC
);
82 pud
= (pud_t
*)get_safe_page(GFP_ATOMIC
);
86 pmd
= (pmd_t
*)get_safe_page(GFP_ATOMIC
);
90 set_pmd(pmd
+ pmd_index(restore_jump_address
),
91 __pmd((jump_address_phys
& PMD_MASK
) | pgprot_val(pmd_text_prot
)));
92 set_pud(pud
+ pud_index(restore_jump_address
),
93 __pud(__pa(pmd
) | pgprot_val(pgtable_prot
)));
95 p4d_t new_p4d
= __p4d(__pa(pud
) | pgprot_val(pgtable_prot
));
96 pgd_t new_pgd
= __pgd(__pa(p4d
) | pgprot_val(pgtable_prot
));
98 set_p4d(p4d
+ p4d_index(restore_jump_address
), new_p4d
);
99 set_pgd(pgd
+ pgd_index(restore_jump_address
), new_pgd
);
101 /* No p4d for 4-level paging: point the pgd to the pud page table */
102 pgd_t new_pgd
= __pgd(__pa(pud
) | pgprot_val(pgtable_prot
));
103 set_pgd(pgd
+ pgd_index(restore_jump_address
), new_pgd
);
109 static void *alloc_pgt_page(void *context
)
111 return (void *)get_safe_page(GFP_ATOMIC
);
114 static int set_up_temporary_mappings(void)
116 struct x86_mapping_info info
= {
117 .alloc_pgt_page
= alloc_pgt_page
,
118 .page_flag
= __PAGE_KERNEL_LARGE_EXEC
,
119 .offset
= __PAGE_OFFSET
,
121 unsigned long mstart
, mend
;
126 pgd
= (pgd_t
*)get_safe_page(GFP_ATOMIC
);
130 /* Prepare a temporary mapping for the kernel text */
131 result
= set_up_temporary_text_mapping(pgd
);
135 /* Set up the direct mapping from scratch */
136 for (i
= 0; i
< nr_pfn_mapped
; i
++) {
137 mstart
= pfn_mapped
[i
].start
<< PAGE_SHIFT
;
138 mend
= pfn_mapped
[i
].end
<< PAGE_SHIFT
;
140 result
= kernel_ident_mapping_init(&info
, pgd
, mstart
, mend
);
145 temp_level4_pgt
= __pa(pgd
);
149 static int relocate_restore_code(void)
157 relocated_restore_code
= get_safe_page(GFP_ATOMIC
);
158 if (!relocated_restore_code
)
161 memcpy((void *)relocated_restore_code
, core_restore_code
, PAGE_SIZE
);
163 /* Make the page containing the relocated code executable */
164 pgd
= (pgd_t
*)__va(read_cr3_pa()) +
165 pgd_index(relocated_restore_code
);
166 p4d
= p4d_offset(pgd
, relocated_restore_code
);
167 if (p4d_large(*p4d
)) {
168 set_p4d(p4d
, __p4d(p4d_val(*p4d
) & ~_PAGE_NX
));
171 pud
= pud_offset(p4d
, relocated_restore_code
);
172 if (pud_large(*pud
)) {
173 set_pud(pud
, __pud(pud_val(*pud
) & ~_PAGE_NX
));
176 pmd
= pmd_offset(pud
, relocated_restore_code
);
177 if (pmd_large(*pmd
)) {
178 set_pmd(pmd
, __pmd(pmd_val(*pmd
) & ~_PAGE_NX
));
181 pte
= pte_offset_kernel(pmd
, relocated_restore_code
);
182 set_pte(pte
, __pte(pte_val(*pte
) & ~_PAGE_NX
));
188 asmlinkage
int swsusp_arch_resume(void)
192 /* We have got enough memory and from now on we cannot recover */
193 error
= set_up_temporary_mappings();
197 error
= relocate_restore_code();
206 * pfn_is_nosave - check if given pfn is in the 'nosave' section
209 int pfn_is_nosave(unsigned long pfn
)
211 unsigned long nosave_begin_pfn
= __pa_symbol(&__nosave_begin
) >> PAGE_SHIFT
;
212 unsigned long nosave_end_pfn
= PAGE_ALIGN(__pa_symbol(&__nosave_end
)) >> PAGE_SHIFT
;
213 return (pfn
>= nosave_begin_pfn
) && (pfn
< nosave_end_pfn
);
216 #define MD5_DIGEST_SIZE 16
218 struct restore_data_record
{
219 unsigned long jump_address
;
220 unsigned long jump_address_phys
;
223 u8 e820_digest
[MD5_DIGEST_SIZE
];
226 #define RESTORE_MAGIC 0x23456789ABCDEF01UL
228 #if IS_BUILTIN(CONFIG_CRYPTO_MD5)
230 * get_e820_md5 - calculate md5 according to given e820 table
232 * @table: the e820 table to be calculated
233 * @buf: the md5 result to be stored to
235 static int get_e820_md5(struct e820_table
*table
, void *buf
)
237 struct crypto_shash
*tfm
;
238 struct shash_desc
*desc
;
242 tfm
= crypto_alloc_shash("md5", 0, 0);
246 desc
= kmalloc(sizeof(struct shash_desc
) + crypto_shash_descsize(tfm
),
256 size
= offsetof(struct e820_table
, entries
) +
257 sizeof(struct e820_entry
) * table
->nr_entries
;
259 if (crypto_shash_digest(desc
, (u8
*)table
, size
, buf
))
265 crypto_free_shash(tfm
);
269 static void hibernation_e820_save(void *buf
)
271 get_e820_md5(e820_table_firmware
, buf
);
274 static bool hibernation_e820_mismatch(void *buf
)
277 u8 result
[MD5_DIGEST_SIZE
];
279 memset(result
, 0, MD5_DIGEST_SIZE
);
280 /* If there is no digest in suspend kernel, let it go. */
281 if (!memcmp(result
, buf
, MD5_DIGEST_SIZE
))
284 ret
= get_e820_md5(e820_table_firmware
, result
);
288 return memcmp(result
, buf
, MD5_DIGEST_SIZE
) ? true : false;
291 static void hibernation_e820_save(void *buf
)
295 static bool hibernation_e820_mismatch(void *buf
)
297 /* If md5 is not builtin for restore kernel, let it go. */
303 * arch_hibernation_header_save - populate the architecture specific part
304 * of a hibernation image header
305 * @addr: address to save the data at
307 int arch_hibernation_header_save(void *addr
, unsigned int max_size
)
309 struct restore_data_record
*rdr
= addr
;
311 if (max_size
< sizeof(struct restore_data_record
))
313 rdr
->jump_address
= (unsigned long)restore_registers
;
314 rdr
->jump_address_phys
= __pa_symbol(restore_registers
);
317 * The restore code fixes up CR3 and CR4 in the following sequence:
319 * [in hibernation asm]
320 * 1. CR3 <= temporary page tables
321 * 2. CR4 <= mmu_cr4_features (from the kernel that restores us)
323 * 4. CR4 <= mmu_cr4_features (from us, i.e. the image kernel)
324 * [in restore_processor_state()]
325 * 5. CR4 <= saved CR4
326 * 6. CR3 <= saved CR3
328 * Our mmu_cr4_features has CR4.PCIDE=0, and toggling
329 * CR4.PCIDE while CR3's PCID bits are nonzero is illegal, so
330 * rdr->cr3 needs to point to valid page tables but must not
331 * have any of the PCID bits set.
333 rdr
->cr3
= restore_cr3
& ~CR3_PCID_MASK
;
335 rdr
->magic
= RESTORE_MAGIC
;
337 hibernation_e820_save(rdr
->e820_digest
);
343 * arch_hibernation_header_restore - read the architecture specific data
344 * from the hibernation image header
345 * @addr: address to read the data from
347 int arch_hibernation_header_restore(void *addr
)
349 struct restore_data_record
*rdr
= addr
;
351 restore_jump_address
= rdr
->jump_address
;
352 jump_address_phys
= rdr
->jump_address_phys
;
353 restore_cr3
= rdr
->cr3
;
355 if (rdr
->magic
!= RESTORE_MAGIC
) {
356 pr_crit("Unrecognized hibernate image header format!\n");
360 if (hibernation_e820_mismatch(rdr
->e820_digest
)) {
361 pr_crit("Hibernate inconsistent memory map detected!\n");
368 int arch_resume_nosmt(void)
372 * We reached this while coming out of hibernation. This means
373 * that SMT siblings are sleeping in hlt, as mwait is not safe
374 * against control transition during resume (see comment in
375 * hibernate_resume_nonboot_cpu_disable()).
377 * If the resumed kernel has SMT disabled, we have to take all the
378 * SMT siblings out of hlt, and offline them again so that they
379 * end up in mwait proper.
381 * Called with hotplug disabled.
383 cpu_hotplug_enable();
384 if (cpu_smt_control
== CPU_SMT_DISABLED
||
385 cpu_smt_control
== CPU_SMT_FORCE_DISABLED
) {
386 enum cpuhp_smt_control old
= cpu_smt_control
;
388 ret
= cpuhp_smt_enable();
391 ret
= cpuhp_smt_disable(old
);
396 cpu_hotplug_disable();