1 /* SPDX-License-Identifier: GPL-2.0 */
2 #include <linux/threads.h>
3 #include <asm/processor.h>
5 #include <asm/cputable.h>
6 #include <asm/thread_info.h>
7 #include <asm/ppc_asm.h>
8 #include <asm/asm-offsets.h>
10 #include <asm/feature-fixups.h>
13 * Structure for storing CPU registers on the save area.
19 #define SL_SPRG0 0x10 /* 4 sprg's */
40 #define SL_R12 0xb4 /* r12 to r31 */
41 #define SL_SIZE (SL_R12 + 80)
46 _GLOBAL(swsusp_save_area)
53 _GLOBAL(swsusp_arch_suspend)
55 lis r11,swsusp_save_area@h
56 ori r11,r11,swsusp_save_area@l
72 /* Get a stable timebase and save it */
85 stw r4,SL_SPRG0+4(r11)
87 stw r4,SL_SPRG0+8(r11)
89 stw r4,SL_SPRG0+12(r11)
95 stw r4,SL_DBAT0+4(r11)
99 stw r4,SL_DBAT1+4(r11)
103 stw r4,SL_DBAT2+4(r11)
107 stw r4,SL_DBAT3+4(r11)
111 stw r4,SL_IBAT0+4(r11)
115 stw r4,SL_IBAT1+4(r11)
119 stw r4,SL_IBAT2+4(r11)
123 stw r4,SL_IBAT3+4(r11)
125 BEGIN_MMU_FTR_SECTION
129 stw r4,SL_DBAT4+4(r11)
133 stw r4,SL_DBAT5+4(r11)
137 stw r4,SL_DBAT6+4(r11)
141 stw r4,SL_DBAT7+4(r11)
145 stw r4,SL_IBAT4+4(r11)
149 stw r4,SL_IBAT5+4(r11)
153 stw r4,SL_IBAT6+4(r11)
157 stw r4,SL_IBAT7+4(r11)
158 END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
161 /* Backup various CPU config stuffs */
164 /* Call the low level suspend stuff (we should probably have made
169 /* Restore LR from the save area */
170 lis r11,swsusp_save_area@h
171 ori r11,r11,swsusp_save_area@l
179 _GLOBAL(swsusp_arch_resume)
181 #ifdef CONFIG_ALTIVEC
182 /* Stop pending alitvec streams and memory accesses */
185 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
189 /* Disable MSR:DR to make sure we don't take a TLB or
190 * hash miss during the copy, as our hash table will
191 * for a while be unusable. For .text, we assume we are
192 * covered by a BAT. This works only for non-G5 at this
193 * point. G5 will need a better approach, possibly using
194 * a small temporary hash table filled with large mappings,
195 * disabling the MMU completely isn't a good option for
196 * performance reasons.
197 * (Note that 750's may have the same performance issue as
198 * the G5 in this case, we should investigate using moving
199 * BATs for these CPUs)
203 rlwinm r0,r0,0,28,26 /* clear MSR_DR */
208 /* Load ptr the list of pages to copy in r3 */
209 lis r11,(restore_pblist - KERNELBASE)@h
210 ori r11,r11,restore_pblist@l
213 /* Copy the pages. This is a very basic implementation, to
214 * be replaced by something more cache efficient */
219 lwz r11,pbe_address(r3) /* source */
221 lwz r10,pbe_orig_address(r3) /* destination */
239 /* Do a very simple cache flush/inval of the L1 to ensure
240 * coherency of the icache
252 /* Now flush those cache lines */
262 /* Ok, we are now running with the kernel data of the old
263 * kernel fully restored. We can get to the save area
264 * easily now. As for the rest of the code, it assumes the
265 * loader kernel and the booted one are exactly identical
267 lis r11,swsusp_save_area@h
268 ori r11,r11,swsusp_save_area@l
272 /* Restore various CPU config stuffs */
273 bl __restore_cpu_setup
275 /* Restore the BATs, and SDR1. Then we can turn on the MMU.
276 * This is a bit hairy as we are running out of those BATs,
277 * but first, our code is probably in the icache, and we are
278 * writing the same value to the BAT, so that should be fine,
279 * though a better solution will have to be found long-term
285 lwz r4,SL_SPRG0+4(r11)
287 lwz r4,SL_SPRG0+8(r11)
289 lwz r4,SL_SPRG0+12(r11)
295 lwz r4,SL_DBAT0+4(r11)
299 lwz r4,SL_DBAT1+4(r11)
303 lwz r4,SL_DBAT2+4(r11)
307 lwz r4,SL_DBAT3+4(r11)
311 lwz r4,SL_IBAT0+4(r11)
315 lwz r4,SL_IBAT1+4(r11)
319 lwz r4,SL_IBAT2+4(r11)
323 lwz r4,SL_IBAT3+4(r11)
325 BEGIN_MMU_FTR_SECTION
328 lwz r4,SL_DBAT4+4(r11)
332 lwz r4,SL_DBAT5+4(r11)
336 lwz r4,SL_DBAT6+4(r11)
340 lwz r4,SL_DBAT7+4(r11)
344 lwz r4,SL_IBAT4+4(r11)
348 lwz r4,SL_IBAT5+4(r11)
352 lwz r4,SL_IBAT6+4(r11)
356 lwz r4,SL_IBAT7+4(r11)
358 END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
363 1: addic. r4,r4,-0x1000
368 /* restore the MSR and turn on the MMU */
381 /* Kick decrementer */
385 /* Restore the callee-saved registers and return */
394 // XXX Note: we don't really need to call swsusp_resume
399 /* FIXME:This construct is actually not useful since we don't shut
400 * down the instruction MMU, we could just flip back MSR-DR on.