fs: use kmem_cache_zalloc instead
[pv_ops_mirror.git] / include / asm-x86 / kexec_32.h
blob4b9dc9e6b701de5987d3ffe4f1d2709af183695d
1 #ifndef _I386_KEXEC_H
2 #define _I386_KEXEC_H
4 #define PA_CONTROL_PAGE 0
5 #define VA_CONTROL_PAGE 1
6 #define PA_PGD 2
7 #define VA_PGD 3
8 #define PA_PTE_0 4
9 #define VA_PTE_0 5
10 #define PA_PTE_1 6
11 #define VA_PTE_1 7
12 #ifdef CONFIG_X86_PAE
13 #define PA_PMD_0 8
14 #define VA_PMD_0 9
15 #define PA_PMD_1 10
16 #define VA_PMD_1 11
17 #define PAGES_NR 12
18 #else
19 #define PAGES_NR 8
20 #endif
22 #ifndef __ASSEMBLY__
24 #include <asm/ptrace.h>
25 #include <asm/string.h>
28 * KEXEC_SOURCE_MEMORY_LIMIT maximum page get_free_page can return.
29 * I.e. Maximum page that is mapped directly into kernel memory,
30 * and kmap is not required.
33 /* Maximum physical address we can use pages from */
34 #define KEXEC_SOURCE_MEMORY_LIMIT (-1UL)
35 /* Maximum address we can reach in physical address mode */
36 #define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL)
37 /* Maximum address we can use for the control code buffer */
38 #define KEXEC_CONTROL_MEMORY_LIMIT TASK_SIZE
40 #define KEXEC_CONTROL_CODE_SIZE 4096
42 /* The native architecture */
43 #define KEXEC_ARCH KEXEC_ARCH_386
45 /* We can also handle crash dumps from 64 bit kernel. */
46 #define vmcore_elf_check_arch_cross(x) ((x)->e_machine == EM_X86_64)
48 /* CPU does not save ss and esp on stack if execution is already
49 * running in kernel mode at the time of NMI occurrence. This code
50 * fixes it.
52 static inline void crash_fixup_ss_esp(struct pt_regs *newregs,
53 struct pt_regs *oldregs)
55 memcpy(newregs, oldregs, sizeof(*newregs));
56 newregs->esp = (unsigned long)&(oldregs->esp);
57 __asm__ __volatile__(
58 "xorl %%eax, %%eax\n\t"
59 "movw %%ss, %%ax\n\t"
60 :"=a"(newregs->xss));
64 * This function is responsible for capturing register states if coming
65 * via panic otherwise just fix up the ss and esp if coming via kernel
66 * mode exception.
68 static inline void crash_setup_regs(struct pt_regs *newregs,
69 struct pt_regs *oldregs)
71 if (oldregs)
72 crash_fixup_ss_esp(newregs, oldregs);
73 else {
74 __asm__ __volatile__("movl %%ebx,%0" : "=m"(newregs->ebx));
75 __asm__ __volatile__("movl %%ecx,%0" : "=m"(newregs->ecx));
76 __asm__ __volatile__("movl %%edx,%0" : "=m"(newregs->edx));
77 __asm__ __volatile__("movl %%esi,%0" : "=m"(newregs->esi));
78 __asm__ __volatile__("movl %%edi,%0" : "=m"(newregs->edi));
79 __asm__ __volatile__("movl %%ebp,%0" : "=m"(newregs->ebp));
80 __asm__ __volatile__("movl %%eax,%0" : "=m"(newregs->eax));
81 __asm__ __volatile__("movl %%esp,%0" : "=m"(newregs->esp));
82 __asm__ __volatile__("movw %%ss, %%ax;" :"=a"(newregs->xss));
83 __asm__ __volatile__("movw %%cs, %%ax;" :"=a"(newregs->xcs));
84 __asm__ __volatile__("movw %%ds, %%ax;" :"=a"(newregs->xds));
85 __asm__ __volatile__("movw %%es, %%ax;" :"=a"(newregs->xes));
86 __asm__ __volatile__("pushfl; popl %0" :"=m"(newregs->eflags));
88 newregs->eip = (unsigned long)current_text_addr();
91 asmlinkage NORET_TYPE void
92 relocate_kernel(unsigned long indirection_page,
93 unsigned long control_page,
94 unsigned long start_address,
95 unsigned int has_pae) ATTRIB_NORET;
97 #endif /* __ASSEMBLY__ */
99 #endif /* _I386_KEXEC_H */