2 * Hibernation support for x86-64
4 * Distribute under GPLv2.
6 * Copyright 2007 Rafael J. Wysocki <rjw@sisk.pl>
7 * Copyright 2005 Andi Kleen <ak@suse.de>
8 * Copyright 2004 Pavel Machek <pavel@suse.cz>
10 * swsusp_arch_resume must not use any stack or any nonlocal variables while
13 * Its rewriting one kernel image with another. What is stack in "old"
14 * image could very well be data page in "new" image, and overwriting
15 * your own stack under you is bad idea.
19 #include <linux/linkage.h>
20 #include <asm/segment.h>
22 #include <asm/asm-offsets.h>
23 <<<<<<< HEAD:arch/x86/power/hibernate_asm_64.S
25 #include <asm/processor-flags.h>
26 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:arch/x86/power/hibernate_asm_64.S
28 ENTRY(swsusp_arch_suspend)
29 movq $saved_context, %rax
30 movq %rsp, pt_regs_sp(%rax)
31 movq %rbp, pt_regs_bp(%rax)
32 movq %rsi, pt_regs_si(%rax)
33 movq %rdi, pt_regs_di(%rax)
34 movq %rbx, pt_regs_bx(%rax)
35 movq %rcx, pt_regs_cx(%rax)
36 movq %rdx, pt_regs_dx(%rax)
37 movq %r8, pt_regs_r8(%rax)
38 movq %r9, pt_regs_r9(%rax)
39 movq %r10, pt_regs_r10(%rax)
40 movq %r11, pt_regs_r11(%rax)
41 movq %r12, pt_regs_r12(%rax)
42 movq %r13, pt_regs_r13(%rax)
43 movq %r14, pt_regs_r14(%rax)
44 movq %r15, pt_regs_r15(%rax)
46 popq pt_regs_flags(%rax)
48 /* save the address of restore_registers */
49 movq $restore_registers, %rax
50 movq %rax, restore_jump_address(%rip)
53 movq %rax, restore_cr3(%rip)
59 /* switch to temporary page tables */
60 movq $__PAGE_OFFSET, %rdx
61 movq temp_level4_pgt(%rip), %rax
65 movq mmu_cr4_features(%rip), %rax
67 <<<<<<< HEAD:arch/x86/power/hibernate_asm_64.S
68 andq $~(1<<7), %rdx # PGE
70 andq $~(X86_CR4_PGE), %rdx
71 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:arch/x86/power/hibernate_asm_64.S
72 movq %rdx, %cr4; # turn off PGE
73 movq %cr3, %rcx; # flush TLB
75 movq %rax, %cr4; # turn PGE back on
77 /* prepare to jump to the image kernel */
78 movq restore_jump_address(%rip), %rax
79 movq restore_cr3(%rip), %rbx
81 /* prepare to copy image data to their original locations */
82 movq restore_pblist(%rip), %rdx
83 movq relocated_restore_code(%rip), %rcx
86 /* code below has been relocated to a safe page */
87 ENTRY(core_restore_code)
92 /* get addresses from the pbe and copy the page */
93 movq pbe_address(%rdx), %rsi
94 movq pbe_orig_address(%rdx), %rdi
95 movq $(PAGE_SIZE >> 3), %rcx
99 /* progress to the next pbe */
100 movq pbe_next(%rdx), %rdx
103 /* jump to the restore_registers address from the image header */
106 * NOTE: This assumes that the boot kernel's text mapping covers the
107 * image kernel's page containing restore_registers and the address of
108 * this page is the same as in the image kernel's text mapping (it
109 * should always be true, because the text mapping is linear, starting
110 * from 0, and is supposed to cover the entire kernel text for every
113 * code below belongs to the image kernel
116 ENTRY(restore_registers)
117 /* go back to the original page tables */
120 /* Flush TLB, including "global" things (vmalloc) */
121 movq mmu_cr4_features(%rip), %rax
123 <<<<<<< HEAD:arch/x86/power/hibernate_asm_64.S
124 andq $~(1<<7), %rdx; # PGE
126 andq $~(X86_CR4_PGE), %rdx
127 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:arch/x86/power/hibernate_asm_64.S
128 movq %rdx, %cr4; # turn off PGE
129 movq %cr3, %rcx; # flush TLB
131 movq %rax, %cr4; # turn PGE back on
133 /* We don't restore %rax, it must be 0 anyway */
134 movq $saved_context, %rax
135 movq pt_regs_sp(%rax), %rsp
136 movq pt_regs_bp(%rax), %rbp
137 movq pt_regs_si(%rax), %rsi
138 movq pt_regs_di(%rax), %rdi
139 movq pt_regs_bx(%rax), %rbx
140 movq pt_regs_cx(%rax), %rcx
141 movq pt_regs_dx(%rax), %rdx
142 movq pt_regs_r8(%rax), %r8
143 movq pt_regs_r9(%rax), %r9
144 movq pt_regs_r10(%rax), %r10
145 movq pt_regs_r11(%rax), %r11
146 movq pt_regs_r12(%rax), %r12
147 movq pt_regs_r13(%rax), %r13
148 movq pt_regs_r14(%rax), %r14
149 movq pt_regs_r15(%rax), %r15
150 pushq pt_regs_flags(%rax)
155 /* tell the hibernation core that we've just restored the memory */
156 movq %rax, in_suspend(%rip)