1 /* SPDX-License-Identifier: GPL-2.0 */
2 #include <linux/threads.h>
3 #include <linux/linkage.h>
5 #include <asm/processor.h>
7 #include <asm/cputable.h>
8 #include <asm/thread_info.h>
9 #include <asm/ppc_asm.h>
10 #include <asm/asm-offsets.h>
12 #include <asm/feature-fixups.h>
15 * Structure for storing CPU registers on the save area.
21 #define SL_SPRG0 0x10 /* 4 sprg's */
42 #define SL_R12 0xb4 /* r12 to r31 */
43 #define SL_SIZE (SL_R12 + 80)
48 _GLOBAL(swsusp_save_area)
55 _GLOBAL(swsusp_arch_suspend)
57 lis r11,swsusp_save_area@h
58 ori r11,r11,swsusp_save_area@l
74 /* Get a stable timebase and save it */
87 stw r4,SL_SPRG0+4(r11)
89 stw r4,SL_SPRG0+8(r11)
91 stw r4,SL_SPRG0+12(r11)
97 stw r4,SL_DBAT0+4(r11)
101 stw r4,SL_DBAT1+4(r11)
105 stw r4,SL_DBAT2+4(r11)
109 stw r4,SL_DBAT3+4(r11)
113 stw r4,SL_IBAT0+4(r11)
117 stw r4,SL_IBAT1+4(r11)
121 stw r4,SL_IBAT2+4(r11)
125 stw r4,SL_IBAT3+4(r11)
127 BEGIN_MMU_FTR_SECTION
131 stw r4,SL_DBAT4+4(r11)
135 stw r4,SL_DBAT5+4(r11)
139 stw r4,SL_DBAT6+4(r11)
143 stw r4,SL_DBAT7+4(r11)
147 stw r4,SL_IBAT4+4(r11)
151 stw r4,SL_IBAT5+4(r11)
155 stw r4,SL_IBAT6+4(r11)
159 stw r4,SL_IBAT7+4(r11)
160 END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
163 /* Backup various CPU config stuffs */
166 /* Call the low level suspend stuff (we should probably have made
171 /* Restore LR from the save area */
172 lis r11,swsusp_save_area@h
173 ori r11,r11,swsusp_save_area@l
181 _GLOBAL(swsusp_arch_resume)
183 #ifdef CONFIG_ALTIVEC
184 /* Stop pending alitvec streams and memory accesses */
187 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
191 /* Disable MSR:DR to make sure we don't take a TLB or
192 * hash miss during the copy, as our hash table will
193 * for a while be unusable. For .text, we assume we are
194 * covered by a BAT. This works only for non-G5 at this
195 * point. G5 will need a better approach, possibly using
196 * a small temporary hash table filled with large mappings,
197 * disabling the MMU completely isn't a good option for
198 * performance reasons.
199 * (Note that 750's may have the same performance issue as
200 * the G5 in this case, we should investigate using moving
201 * BATs for these CPUs)
205 rlwinm r0,r0,0,28,26 /* clear MSR_DR */
210 /* Load ptr the list of pages to copy in r3 */
211 lis r11,(restore_pblist - KERNELBASE)@h
212 ori r11,r11,restore_pblist@l
215 /* Copy the pages. This is a very basic implementation, to
216 * be replaced by something more cache efficient */
221 lwz r11,pbe_address(r3) /* source */
223 lwz r10,pbe_orig_address(r3) /* destination */
241 /* Do a very simple cache flush/inval of the L1 to ensure
242 * coherency of the icache
254 /* Now flush those cache lines */
264 /* Ok, we are now running with the kernel data of the old
265 * kernel fully restored. We can get to the save area
266 * easily now. As for the rest of the code, it assumes the
267 * loader kernel and the booted one are exactly identical
269 lis r11,swsusp_save_area@h
270 ori r11,r11,swsusp_save_area@l
274 /* Restore various CPU config stuffs */
275 bl __restore_cpu_setup
277 /* Restore the BATs, and SDR1. Then we can turn on the MMU.
278 * This is a bit hairy as we are running out of those BATs,
279 * but first, our code is probably in the icache, and we are
280 * writing the same value to the BAT, so that should be fine,
281 * though a better solution will have to be found long-term
287 lwz r4,SL_SPRG0+4(r11)
289 lwz r4,SL_SPRG0+8(r11)
291 lwz r4,SL_SPRG0+12(r11)
297 lwz r4,SL_DBAT0+4(r11)
301 lwz r4,SL_DBAT1+4(r11)
305 lwz r4,SL_DBAT2+4(r11)
309 lwz r4,SL_DBAT3+4(r11)
313 lwz r4,SL_IBAT0+4(r11)
317 lwz r4,SL_IBAT1+4(r11)
321 lwz r4,SL_IBAT2+4(r11)
325 lwz r4,SL_IBAT3+4(r11)
327 BEGIN_MMU_FTR_SECTION
330 lwz r4,SL_DBAT4+4(r11)
334 lwz r4,SL_DBAT5+4(r11)
338 lwz r4,SL_DBAT6+4(r11)
342 lwz r4,SL_DBAT7+4(r11)
346 lwz r4,SL_IBAT4+4(r11)
350 lwz r4,SL_IBAT5+4(r11)
354 lwz r4,SL_IBAT6+4(r11)
358 lwz r4,SL_IBAT7+4(r11)
360 END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
365 1: addic. r4,r4,-0x1000
370 /* restore the MSR and turn on the MMU */
383 /* Kick decrementer */
387 /* Restore the callee-saved registers and return */
396 // XXX Note: we don't really need to call swsusp_resume
400 _ASM_NOKPROBE_SYMBOL(swsusp_arch_resume)
402 /* FIXME:This construct is actually not useful since we don't shut
403 * down the instruction MMU, we could just flip back MSR-DR on.
405 SYM_FUNC_START_LOCAL(turn_on_mmu)
412 _ASM_NOKPROBE_SYMBOL(turn_on_mmu)
413 SYM_FUNC_END(turn_on_mmu)