2 * handle transition of Linux booting another kernel
3 * Copyright (C) 2002-2005 Eric Biederman <ebiederm@xmission.com>
5 * This source code is licensed under the GNU General Public License,
6 * Version 2. See the file COPYING for more details.
10 #include <linux/kexec.h>
11 #include <linux/string.h>
12 #include <linux/gfp.h>
13 #include <linux/reboot.h>
14 #include <linux/numa.h>
15 #include <linux/ftrace.h>
17 #include <linux/suspend.h>
20 #include <asm/pgtable.h>
21 #include <asm/tlbflush.h>
22 #include <asm/mmu_context.h>
23 #include <asm/debugreg.h>
25 static void free_transition_pgtable(struct kimage
*image
)
27 free_page((unsigned long)image
->arch
.pud
);
28 free_page((unsigned long)image
->arch
.pmd
);
29 free_page((unsigned long)image
->arch
.pte
);
32 static int init_transition_pgtable(struct kimage
*image
, pgd_t
*pgd
)
37 unsigned long vaddr
, paddr
;
40 vaddr
= (unsigned long)relocate_kernel
;
41 paddr
= __pa(page_address(image
->control_code_page
)+PAGE_SIZE
);
42 pgd
+= pgd_index(vaddr
);
43 if (!pgd_present(*pgd
)) {
44 pud
= (pud_t
*)get_zeroed_page(GFP_KERNEL
);
47 image
->arch
.pud
= pud
;
48 set_pgd(pgd
, __pgd(__pa(pud
) | _KERNPG_TABLE
));
50 pud
= pud_offset(pgd
, vaddr
);
51 if (!pud_present(*pud
)) {
52 pmd
= (pmd_t
*)get_zeroed_page(GFP_KERNEL
);
55 image
->arch
.pmd
= pmd
;
56 set_pud(pud
, __pud(__pa(pmd
) | _KERNPG_TABLE
));
58 pmd
= pmd_offset(pud
, vaddr
);
59 if (!pmd_present(*pmd
)) {
60 pte
= (pte_t
*)get_zeroed_page(GFP_KERNEL
);
63 image
->arch
.pte
= pte
;
64 set_pmd(pmd
, __pmd(__pa(pte
) | _KERNPG_TABLE
));
66 pte
= pte_offset_kernel(pmd
, vaddr
);
67 set_pte(pte
, pfn_pte(paddr
>> PAGE_SHIFT
, PAGE_KERNEL_EXEC
));
70 free_transition_pgtable(image
);
74 static void *alloc_pgt_page(void *data
)
76 struct kimage
*image
= (struct kimage
*)data
;
80 page
= kimage_alloc_control_pages(image
, 0);
82 p
= page_address(page
);
89 static int init_pgtable(struct kimage
*image
, unsigned long start_pgtable
)
91 struct x86_mapping_info info
= {
92 .alloc_pgt_page
= alloc_pgt_page
,
94 .pmd_flag
= __PAGE_KERNEL_LARGE_EXEC
,
96 unsigned long mstart
, mend
;
101 level4p
= (pgd_t
*)__va(start_pgtable
);
103 for (i
= 0; i
< nr_pfn_mapped
; i
++) {
104 mstart
= pfn_mapped
[i
].start
<< PAGE_SHIFT
;
105 mend
= pfn_mapped
[i
].end
<< PAGE_SHIFT
;
107 result
= kernel_ident_mapping_init(&info
,
108 level4p
, mstart
, mend
);
114 * segments's mem ranges could be outside 0 ~ max_pfn,
115 * for example when jump back to original kernel from kexeced kernel.
116 * or first kernel is booted with user mem map, and second kernel
117 * could be loaded out of that range.
119 for (i
= 0; i
< image
->nr_segments
; i
++) {
120 mstart
= image
->segment
[i
].mem
;
121 mend
= mstart
+ image
->segment
[i
].memsz
;
123 result
= kernel_ident_mapping_init(&info
,
124 level4p
, mstart
, mend
);
130 return init_transition_pgtable(image
, level4p
);
133 static void set_idt(void *newidt
, u16 limit
)
135 struct desc_ptr curidt
;
137 /* x86-64 supports unaliged loads & stores */
139 curidt
.address
= (unsigned long)newidt
;
141 __asm__
__volatile__ (
148 static void set_gdt(void *newgdt
, u16 limit
)
150 struct desc_ptr curgdt
;
152 /* x86-64 supports unaligned loads & stores */
154 curgdt
.address
= (unsigned long)newgdt
;
156 __asm__
__volatile__ (
162 static void load_segments(void)
164 __asm__
__volatile__ (
170 : : "a" (__KERNEL_DS
) : "memory"
174 int machine_kexec_prepare(struct kimage
*image
)
176 unsigned long start_pgtable
;
179 /* Calculate the offsets */
180 start_pgtable
= page_to_pfn(image
->control_code_page
) << PAGE_SHIFT
;
182 /* Setup the identity mapped 64bit page table */
183 result
= init_pgtable(image
, start_pgtable
);
190 void machine_kexec_cleanup(struct kimage
*image
)
192 free_transition_pgtable(image
);
196 * Do not allocate memory (or fail in any way) in machine_kexec().
197 * We are past the point of no return, committed to rebooting now.
199 void machine_kexec(struct kimage
*image
)
201 unsigned long page_list
[PAGES_NR
];
203 int save_ftrace_enabled
;
205 #ifdef CONFIG_KEXEC_JUMP
206 if (image
->preserve_context
)
207 save_processor_state();
210 save_ftrace_enabled
= __ftrace_enabled_save();
212 /* Interrupts aren't acceptable while we reboot */
214 hw_breakpoint_disable();
216 if (image
->preserve_context
) {
217 #ifdef CONFIG_X86_IO_APIC
219 * We need to put APICs in legacy mode so that we can
220 * get timer interrupts in second kernel. kexec/kdump
221 * paths already have calls to disable_IO_APIC() in
222 * one form or other. kexec jump path also need
229 control_page
= page_address(image
->control_code_page
) + PAGE_SIZE
;
230 memcpy(control_page
, relocate_kernel
, KEXEC_CONTROL_CODE_MAX_SIZE
);
232 page_list
[PA_CONTROL_PAGE
] = virt_to_phys(control_page
);
233 page_list
[VA_CONTROL_PAGE
] = (unsigned long)control_page
;
234 page_list
[PA_TABLE_PAGE
] =
235 (unsigned long)__pa(page_address(image
->control_code_page
));
237 if (image
->type
== KEXEC_TYPE_DEFAULT
)
238 page_list
[PA_SWAP_PAGE
] = (page_to_pfn(image
->swap_page
)
242 * The segment registers are funny things, they have both a
243 * visible and an invisible part. Whenever the visible part is
244 * set to a specific selector, the invisible part is loaded
245 * with from a table in memory. At no other time is the
246 * descriptor table in memory accessed.
248 * I take advantage of this here by force loading the
249 * segments, before I zap the gdt with an invalid value.
253 * The gdt & idt are now invalid.
254 * If you want to load them you must set up your own idt & gdt.
256 set_gdt(phys_to_virt(0), 0);
257 set_idt(phys_to_virt(0), 0);
260 image
->start
= relocate_kernel((unsigned long)image
->head
,
261 (unsigned long)page_list
,
263 image
->preserve_context
);
265 #ifdef CONFIG_KEXEC_JUMP
266 if (image
->preserve_context
)
267 restore_processor_state();
270 __ftrace_enabled_restore(save_ftrace_enabled
);
273 void arch_crash_save_vmcoreinfo(void)
275 VMCOREINFO_SYMBOL(phys_base
);
276 VMCOREINFO_SYMBOL(init_level4_pgt
);
279 VMCOREINFO_SYMBOL(node_data
);
280 VMCOREINFO_LENGTH(node_data
, MAX_NUMNODES
);