2 * Hibernation support for x86-64
4 * Distribute under GPLv2
6 * Copyright (c) 2007 Rafael J. Wysocki <rjw@sisk.pl>
7 * Copyright (c) 2002 Pavel Machek <pavel@ucw.cz>
8 * Copyright (c) 2001 Patrick Mochel <mochel@osdl.org>
11 #include <linux/gfp.h>
12 #include <linux/smp.h>
13 #include <linux/suspend.h>
14 #include <linux/scatterlist.h>
15 #include <linux/kdebug.h>
17 #include <crypto/hash.h>
19 #include <asm/e820/api.h>
21 #include <asm/proto.h>
23 #include <asm/pgtable.h>
25 #include <asm/sections.h>
26 #include <asm/suspend.h>
27 #include <asm/tlbflush.h>
29 static int set_up_temporary_text_mapping(pgd_t
*pgd
)
34 pgprot_t pgtable_prot
= __pgprot(_KERNPG_TABLE
);
35 pgprot_t pmd_text_prot
= __pgprot(__PAGE_KERNEL_LARGE_EXEC
);
37 /* Filter out unsupported __PAGE_KERNEL* bits: */
38 pgprot_val(pmd_text_prot
) &= __default_kernel_pte_mask
;
39 pgprot_val(pgtable_prot
) &= __default_kernel_pte_mask
;
42 * The new mapping only has to cover the page containing the image
43 * kernel's entry point (jump_address_phys), because the switch over to
44 * it is carried out by relocated code running from a page allocated
45 * specifically for this purpose and covered by the identity mapping, so
46 * the temporary kernel text mapping is only needed for the final jump.
47 * Moreover, in that mapping the virtual address of the image kernel's
48 * entry point must be the same as its virtual address in the image
49 * kernel (restore_jump_address), so the image kernel's
50 * restore_registers() code doesn't find itself in a different area of
51 * the virtual address space after switching over to the original page
52 * tables used by the image kernel.
55 if (pgtable_l5_enabled()) {
56 p4d
= (p4d_t
*)get_safe_page(GFP_ATOMIC
);
61 pud
= (pud_t
*)get_safe_page(GFP_ATOMIC
);
65 pmd
= (pmd_t
*)get_safe_page(GFP_ATOMIC
);
69 set_pmd(pmd
+ pmd_index(restore_jump_address
),
70 __pmd((jump_address_phys
& PMD_MASK
) | pgprot_val(pmd_text_prot
)));
71 set_pud(pud
+ pud_index(restore_jump_address
),
72 __pud(__pa(pmd
) | pgprot_val(pgtable_prot
)));
74 p4d_t new_p4d
= __p4d(__pa(pud
) | pgprot_val(pgtable_prot
));
75 pgd_t new_pgd
= __pgd(__pa(p4d
) | pgprot_val(pgtable_prot
));
77 set_p4d(p4d
+ p4d_index(restore_jump_address
), new_p4d
);
78 set_pgd(pgd
+ pgd_index(restore_jump_address
), new_pgd
);
80 /* No p4d for 4-level paging: point the pgd to the pud page table */
81 pgd_t new_pgd
= __pgd(__pa(pud
) | pgprot_val(pgtable_prot
));
82 set_pgd(pgd
+ pgd_index(restore_jump_address
), new_pgd
);
88 static void *alloc_pgt_page(void *context
)
90 return (void *)get_safe_page(GFP_ATOMIC
);
93 static int set_up_temporary_mappings(void)
95 struct x86_mapping_info info
= {
96 .alloc_pgt_page
= alloc_pgt_page
,
97 .page_flag
= __PAGE_KERNEL_LARGE_EXEC
,
98 .offset
= __PAGE_OFFSET
,
100 unsigned long mstart
, mend
;
105 pgd
= (pgd_t
*)get_safe_page(GFP_ATOMIC
);
109 /* Prepare a temporary mapping for the kernel text */
110 result
= set_up_temporary_text_mapping(pgd
);
114 /* Set up the direct mapping from scratch */
115 for (i
= 0; i
< nr_pfn_mapped
; i
++) {
116 mstart
= pfn_mapped
[i
].start
<< PAGE_SHIFT
;
117 mend
= pfn_mapped
[i
].end
<< PAGE_SHIFT
;
119 result
= kernel_ident_mapping_init(&info
, pgd
, mstart
, mend
);
124 temp_pgt
= __pa(pgd
);
128 asmlinkage
int swsusp_arch_resume(void)
132 /* We have got enough memory and from now on we cannot recover */
133 error
= set_up_temporary_mappings();
137 error
= relocate_restore_code();