1 /* SPDX-License-Identifier: GPL-2.0 */
2 #include <linux/threads.h>
3 #include <asm/processor.h>
5 #include <asm/cputable.h>
6 #include <asm/thread_info.h>
7 #include <asm/ppc_asm.h>
8 #include <asm/asm-offsets.h>
12 * Structure for storing CPU registers on the save area.
18 #define SL_SPRG0 0x10 /* 4 sprg's */
31 #define SL_R12 0x74 /* r12 to r31 */
32 #define SL_SIZE (SL_R12 + 80)
37 _GLOBAL(swsusp_save_area)
44 _GLOBAL(swsusp_arch_suspend)
46 lis r11,swsusp_save_area@h
47 ori r11,r11,swsusp_save_area@l
63 /* Get a stable timebase and save it */
76 stw r4,SL_SPRG0+4(r11)
78 stw r4,SL_SPRG0+8(r11)
80 stw r4,SL_SPRG0+12(r11)
86 stw r4,SL_DBAT0+4(r11)
90 stw r4,SL_DBAT1+4(r11)
94 stw r4,SL_DBAT2+4(r11)
98 stw r4,SL_DBAT3+4(r11)
102 stw r4,SL_IBAT0+4(r11)
106 stw r4,SL_IBAT1+4(r11)
110 stw r4,SL_IBAT2+4(r11)
114 stw r4,SL_IBAT3+4(r11)
117 /* Backup various CPU config stuffs */
120 /* Call the low level suspend stuff (we should probably have made
125 /* Restore LR from the save area */
126 lis r11,swsusp_save_area@h
127 ori r11,r11,swsusp_save_area@l
135 _GLOBAL(swsusp_arch_resume)
137 #ifdef CONFIG_ALTIVEC
138 /* Stop pending alitvec streams and memory accesses */
141 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
145 /* Disable MSR:DR to make sure we don't take a TLB or
146 * hash miss during the copy, as our hash table will
147 * for a while be unusable. For .text, we assume we are
148 * covered by a BAT. This works only for non-G5 at this
149 * point. G5 will need a better approach, possibly using
150 * a small temporary hash table filled with large mappings,
151 * disabling the MMU completely isn't a good option for
152 * performance reasons.
153 * (Note that 750's may have the same performance issue as
154 * the G5 in this case, we should investigate using moving
155 * BATs for these CPUs)
159 rlwinm r0,r0,0,28,26 /* clear MSR_DR */
164 /* Load ptr the list of pages to copy in r3 */
165 lis r11,(restore_pblist - KERNELBASE)@h
166 ori r11,r11,restore_pblist@l
169 /* Copy the pages. This is a very basic implementation, to
170 * be replaced by something more cache efficient */
175 lwz r11,pbe_address(r3) /* source */
177 lwz r10,pbe_orig_address(r3) /* destination */
195 /* Do a very simple cache flush/inval of the L1 to ensure
196 * coherency of the icache
208 /* Now flush those cache lines */
218 /* Ok, we are now running with the kernel data of the old
219 * kernel fully restored. We can get to the save area
220 * easily now. As for the rest of the code, it assumes the
221 * loader kernel and the booted one are exactly identical
223 lis r11,swsusp_save_area@h
224 ori r11,r11,swsusp_save_area@l
228 /* Restore various CPU config stuffs */
229 bl __restore_cpu_setup
231 /* Restore the BATs, and SDR1. Then we can turn on the MMU.
232 * This is a bit hairy as we are running out of those BATs,
233 * but first, our code is probably in the icache, and we are
234 * writing the same value to the BAT, so that should be fine,
235 * though a better solution will have to be found long-term
241 lwz r4,SL_SPRG0+4(r11)
243 lwz r4,SL_SPRG0+8(r11)
245 lwz r4,SL_SPRG0+12(r11)
251 lwz r4,SL_DBAT0+4(r11)
255 lwz r4,SL_DBAT1+4(r11)
259 lwz r4,SL_DBAT2+4(r11)
263 lwz r4,SL_DBAT3+4(r11)
267 lwz r4,SL_IBAT0+4(r11)
271 lwz r4,SL_IBAT1+4(r11)
275 lwz r4,SL_IBAT2+4(r11)
279 lwz r4,SL_IBAT3+4(r11)
283 BEGIN_MMU_FTR_SECTION
301 END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
305 1: addic. r4,r4,-0x1000
310 /* restore the MSR and turn on the MMU */
323 /* Kick decrementer */
327 /* Restore the callee-saved registers and return */
336 // XXX Note: we don't really need to call swsusp_resume
341 /* FIXME:This construct is actually not useful since we don't shut
342 * down the instruction MMU, we could just flip back MSR-DR on.