1 /* SPDX-License-Identifier: GPL-2.0-or-later */
3 * This file contains low-level cache management functions
4 * used for sleep and CPU speed changes on Apple machines.
5 * (In fact the only thing that is Apple-specific is that we assume
6 * that we can read from ROM at physical address 0xfff00000.)
8 * Copyright (C) 2004 Paul Mackerras (paulus@samba.org) and
9 * Benjamin Herrenschmidt (benh@kernel.crashing.org)
12 #include <asm/processor.h>
13 #include <asm/ppc_asm.h>
14 #include <asm/cputable.h>
15 #include <asm/feature-fixups.h>
18 * Flush and disable all data caches (dL1, L2, L3). This is used
19 * when going to sleep, when doing a PMU based cpufreq transition,
20 * or when "offlining" a CPU on SMP machines. This code is over
21 * paranoid, but I've had enough issues with various CPU revs and
22 * bugs that I decided it was worth being over cautious
25 _GLOBAL(flush_disable_caches)
26 #ifndef CONFIG_PPC_BOOK3S_32
31 END_FTR_SECTION_IFSET(CPU_FTR_SPEC7450)
34 END_FTR_SECTION_IFSET(CPU_FTR_L2CR)
37 /* This is the code for G3 and 74[01]0 */
41 /* Turn off EE and DR in MSR */
43 rlwinm r0,r11,0,~MSR_EE
44 rlwinm r0,r0,0,~MSR_DR
49 /* Stop DST streams */
53 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
56 mfspr r8,SPRN_HID0 /* Save SPRN_HID0 in r8 */
57 rlwinm r4,r8,0,12,10 /* Turn off HID0[DPM] */
59 mtspr SPRN_HID0,r4 /* Disable DPM */
62 /* Disp-flush L1. We have a weird problem here that I never
63 * totally figured out. On 750FX, using the ROM for the flush
64 * results in a non-working flush. We use that workaround for
65 * now until I finally understand what's going on. --BenH
68 /* ROM base by default */
74 /* RAM base on 750FX */
84 /* Disable / invalidate / enable L1 data */
86 rlwinm r3,r3,0,~(HID0_DCE | HID0_ICE)
90 ori r3,r3,(HID0_DCE|HID0_DCI|HID0_ICE|HID0_ICFI)
94 xori r3,r3,(HID0_DCI|HID0_ICFI)
98 /* Get the current enable bit of the L2CR into r4 */
100 /* Set to data-only (pre-745x bit) */
101 oris r3,r5,L2CR_L2DO@h
103 /* When disabling L2, code must be in L1 */
105 1: mtspr SPRN_L2CR,r3
113 1: /* disp-flush L2. The interesting thing here is that the L2 can be
114 * up to 2Mb ... so using the ROM, we'll end up wrapping back to memory
115 * but that is probbaly fine. We disp-flush over 4Mb to be safe
135 rlwinm r5,r5,0,~L2CR_L2E
137 /* When disabling L2, code must be in L1 */
139 1: mtspr SPRN_L2CR,r5
149 /* Invalidate L2. This is pre-745x, we clear the L2I bit ourselves */
150 oris r4,r5,L2CR_L2I@h
155 /* Wait for the invalidation to complete */
156 1: mfspr r3,SPRN_L2CR
157 rlwinm. r0,r3,0,31,31
161 xoris r4,r4,L2CR_L2I@h
166 /* now disable the L1 data cache */
168 rlwinm r0,r0,0,~(HID0_DCE|HID0_ICE)
173 /* Restore HID0[DPM] to whatever it was before */
176 rlwimi r0,r8,0,11,11 /* Turn back HID0[DPM] */
180 /* restore DR and EE */
187 _ASM_NOKPROBE_SYMBOL(flush_disable_75x)
189 /* This code is for 745x processors */
191 /* Turn off EE and DR in MSR */
193 rlwinm r0,r11,0,~MSR_EE
194 rlwinm r0,r0,0,~MSR_DR
199 /* Stop prefetch streams */
203 /* Disable L2 prefetching */
219 /* Due to a bug with the HW flush on some CPU revs, we occasionally
220 * experience data corruption. I'm adding a displacement flush along
221 * with a dcbf loop over a few Mb to "help". The problem isn't totally
222 * fixed by this in theory, but at least, in practice, I couldn't reproduce
223 * it even with a big hammer...
231 addi r4,r4,32 /* Go to start of next cache line */
235 /* Now, flush the first 4MB of memory */
242 addi r4,r4,32 /* Go to start of next cache line */
245 /* Flush and disable the L1 data cache */
247 lis r3,0xfff0 /* read from ROM for displacement flush */
248 li r4,0xfe /* start with only way 0 unlocked */
249 li r5,128 /* 128 lines in each way */
255 2: lwz r0,0(r3) /* touch each cache line */
258 rlwinm r4,r4,1,24,30 /* move on to the next way */
260 cmpwi r4,0xff /* all done? */
262 /* now unlock the L1 data cache */
270 /* Flush the L2 cache using the hardware assist */
272 cmpwi r3,0 /* check if it is enabled first */
274 oris r0,r3,(L2CR_L2IO_745x|L2CR_L2DO_745x)@h
276 /* When disabling/locking L2, code must be in L1 */
278 1: mtspr SPRN_L2CR,r0 /* lock the L2 cache */
288 ori r0,r3,L2CR_L2HWF_745x
290 mtspr SPRN_L2CR,r0 /* set the hardware flush bit */
291 3: mfspr r0,SPRN_L2CR /* wait for it to go to 0 */
292 andi. r0,r0,L2CR_L2HWF_745x
295 rlwinm r3,r3,0,~L2CR_L2E
297 /* When disabling L2, code must be in L1 */
299 1: mtspr SPRN_L2CR,r3 /* disable the L2 cache */
309 oris r4,r3,L2CR_L2I@h
313 1: mfspr r4,SPRN_L2CR
314 andis. r0,r4,L2CR_L2I@h
319 /* Flush the L3 cache using the hardware assist */
320 4: mfspr r3,SPRN_L3CR
321 cmpwi r3,0 /* check if it is enabled */
323 oris r0,r3,L3CR_L3IO@h
326 mtspr SPRN_L3CR,r0 /* lock the L3 cache */
331 mtspr SPRN_L3CR,r0 /* set the hardware flush bit */
332 5: mfspr r0,SPRN_L3CR /* wait for it to go to zero */
333 andi. r0,r0,L3CR_L3HWF
335 rlwinm r3,r3,0,~L3CR_L3E
337 mtspr SPRN_L3CR,r3 /* disable the L3 cache */
341 1: mfspr r4,SPRN_L3CR
345 END_FTR_SECTION_IFSET(CPU_FTR_L3CR)
347 6: mfspr r0,SPRN_HID0 /* now disable the L1 data cache */
348 rlwinm r0,r0,0,~HID0_DCE
352 mtmsr r11 /* restore DR and EE */
355 _ASM_NOKPROBE_SYMBOL(flush_disable_745x)
356 #endif /* CONFIG_PPC_BOOK3S_32 */