sync hh.org
[hh.org.git] / arch / x86_64 / kernel / suspend.c
blob91f7e678bae72e7f5e951919633ff15d6f12451a
1 /*
2 * Suspend support specific for i386.
4 * Distribute under GPLv2
6 * Copyright (c) 2002 Pavel Machek <pavel@suse.cz>
7 * Copyright (c) 2001 Patrick Mochel <mochel@osdl.org>
8 */
10 #include <linux/smp.h>
11 #include <linux/suspend.h>
12 #include <asm/proto.h>
13 #include <asm/page.h>
14 #include <asm/pgtable.h>
16 struct saved_context saved_context;
18 unsigned long saved_context_eax, saved_context_ebx, saved_context_ecx, saved_context_edx;
19 unsigned long saved_context_esp, saved_context_ebp, saved_context_esi, saved_context_edi;
20 unsigned long saved_context_r08, saved_context_r09, saved_context_r10, saved_context_r11;
21 unsigned long saved_context_r12, saved_context_r13, saved_context_r14, saved_context_r15;
22 unsigned long saved_context_eflags;
24 void __save_processor_state(struct saved_context *ctxt)
26 kernel_fpu_begin();
29 * descriptor tables
31 asm volatile ("sgdt %0" : "=m" (ctxt->gdt_limit));
32 asm volatile ("sidt %0" : "=m" (ctxt->idt_limit));
33 asm volatile ("str %0" : "=m" (ctxt->tr));
35 /* XMM0..XMM15 should be handled by kernel_fpu_begin(). */
36 /* EFER should be constant for kernel version, no need to handle it. */
38 * segment registers
40 asm volatile ("movw %%ds, %0" : "=m" (ctxt->ds));
41 asm volatile ("movw %%es, %0" : "=m" (ctxt->es));
42 asm volatile ("movw %%fs, %0" : "=m" (ctxt->fs));
43 asm volatile ("movw %%gs, %0" : "=m" (ctxt->gs));
44 asm volatile ("movw %%ss, %0" : "=m" (ctxt->ss));
46 rdmsrl(MSR_FS_BASE, ctxt->fs_base);
47 rdmsrl(MSR_GS_BASE, ctxt->gs_base);
48 rdmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base);
51 * control registers
53 asm volatile ("movq %%cr0, %0" : "=r" (ctxt->cr0));
54 asm volatile ("movq %%cr2, %0" : "=r" (ctxt->cr2));
55 asm volatile ("movq %%cr3, %0" : "=r" (ctxt->cr3));
56 asm volatile ("movq %%cr4, %0" : "=r" (ctxt->cr4));
57 asm volatile ("movq %%cr8, %0" : "=r" (ctxt->cr8));
60 void save_processor_state(void)
62 __save_processor_state(&saved_context);
65 static void do_fpu_end(void)
68 * Restore FPU regs if necessary
70 kernel_fpu_end();
73 void __restore_processor_state(struct saved_context *ctxt)
76 * control registers
78 asm volatile ("movq %0, %%cr8" :: "r" (ctxt->cr8));
79 asm volatile ("movq %0, %%cr4" :: "r" (ctxt->cr4));
80 asm volatile ("movq %0, %%cr3" :: "r" (ctxt->cr3));
81 asm volatile ("movq %0, %%cr2" :: "r" (ctxt->cr2));
82 asm volatile ("movq %0, %%cr0" :: "r" (ctxt->cr0));
85 * now restore the descriptor tables to their proper values
86 * ltr is done i fix_processor_context().
88 asm volatile ("lgdt %0" :: "m" (ctxt->gdt_limit));
89 asm volatile ("lidt %0" :: "m" (ctxt->idt_limit));
92 * segment registers
94 asm volatile ("movw %0, %%ds" :: "r" (ctxt->ds));
95 asm volatile ("movw %0, %%es" :: "r" (ctxt->es));
96 asm volatile ("movw %0, %%fs" :: "r" (ctxt->fs));
97 load_gs_index(ctxt->gs);
98 asm volatile ("movw %0, %%ss" :: "r" (ctxt->ss));
100 wrmsrl(MSR_FS_BASE, ctxt->fs_base);
101 wrmsrl(MSR_GS_BASE, ctxt->gs_base);
102 wrmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base);
104 fix_processor_context();
106 do_fpu_end();
107 mtrr_ap_init();
110 void restore_processor_state(void)
112 __restore_processor_state(&saved_context);
115 void fix_processor_context(void)
117 int cpu = smp_processor_id();
118 struct tss_struct *t = &per_cpu(init_tss, cpu);
120 set_tss_desc(cpu,t); /* This just modifies memory; should not be neccessary. But... This is neccessary, because 386 hardware has concept of busy TSS or some similar stupidity. */
122 cpu_gdt(cpu)[GDT_ENTRY_TSS].type = 9;
124 syscall_init(); /* This sets MSR_*STAR and related */
125 load_TR_desc(); /* This does ltr */
126 load_LDT(&current->active_mm->context); /* This does lldt */
129 * Now maybe reload the debug registers
131 if (current->thread.debugreg7){
132 loaddebug(&current->thread, 0);
133 loaddebug(&current->thread, 1);
134 loaddebug(&current->thread, 2);
135 loaddebug(&current->thread, 3);
136 /* no 4 and 5 */
137 loaddebug(&current->thread, 6);
138 loaddebug(&current->thread, 7);
143 #ifdef CONFIG_SOFTWARE_SUSPEND
144 /* Defined in arch/x86_64/kernel/suspend_asm.S */
145 extern int restore_image(void);
147 pgd_t *temp_level4_pgt;
149 static int res_phys_pud_init(pud_t *pud, unsigned long address, unsigned long end)
151 long i, j;
153 i = pud_index(address);
154 pud = pud + i;
155 for (; i < PTRS_PER_PUD; pud++, i++) {
156 unsigned long paddr;
157 pmd_t *pmd;
159 paddr = address + i*PUD_SIZE;
160 if (paddr >= end)
161 break;
163 pmd = (pmd_t *)get_safe_page(GFP_ATOMIC);
164 if (!pmd)
165 return -ENOMEM;
166 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
167 for (j = 0; j < PTRS_PER_PMD; pmd++, j++, paddr += PMD_SIZE) {
168 unsigned long pe;
170 if (paddr >= end)
171 break;
172 pe = _PAGE_NX | _PAGE_PSE | _KERNPG_TABLE | paddr;
173 pe &= __supported_pte_mask;
174 set_pmd(pmd, __pmd(pe));
177 return 0;
180 static int set_up_temporary_mappings(void)
182 unsigned long start, end, next;
183 int error;
185 temp_level4_pgt = (pgd_t *)get_safe_page(GFP_ATOMIC);
186 if (!temp_level4_pgt)
187 return -ENOMEM;
189 /* It is safe to reuse the original kernel mapping */
190 set_pgd(temp_level4_pgt + pgd_index(__START_KERNEL_map),
191 init_level4_pgt[pgd_index(__START_KERNEL_map)]);
193 /* Set up the direct mapping from scratch */
194 start = (unsigned long)pfn_to_kaddr(0);
195 end = (unsigned long)pfn_to_kaddr(end_pfn);
197 for (; start < end; start = next) {
198 pud_t *pud = (pud_t *)get_safe_page(GFP_ATOMIC);
199 if (!pud)
200 return -ENOMEM;
201 next = start + PGDIR_SIZE;
202 if (next > end)
203 next = end;
204 if ((error = res_phys_pud_init(pud, __pa(start), __pa(next))))
205 return error;
206 set_pgd(temp_level4_pgt + pgd_index(start),
207 mk_kernel_pgd(__pa(pud)));
209 return 0;
212 int swsusp_arch_resume(void)
214 int error;
216 /* We have got enough memory and from now on we cannot recover */
217 if ((error = set_up_temporary_mappings()))
218 return error;
219 restore_image();
220 return 0;
222 #endif /* CONFIG_SOFTWARE_SUSPEND */