1 /* SPDX-License-Identifier: GPL-2.0-or-later */
3 * PARISC TLB and cache flushing support
4 * Copyright (C) 2000-2001 Hewlett-Packard (John Marvin)
5 * Copyright (C) 2001 Matthew Wilcox (willy at parisc-linux.org)
6 * Copyright (C) 2002 Richard Hirst (rhirst with parisc-linux.org)
10 * NOTE: fdc,fic, and pdc instructions that use base register modification
11 * should only use index and base registers that are not shadowed,
12 * so that the fast path emulation in the non access miss handler
23 #include <asm/assembly.h>
24 #include <asm/cache.h>
26 #include <asm/alternative.h>
27 #include <linux/linkage.h>
28 #include <linux/init.h>
29 #include <linux/pgtable.h>
34 ENTRY_CFI(flush_tlb_all_local)
36 * The pitlbe and pdtlbe instructions should only be used to
37 * flush the entire tlb. Also, there needs to be no intervening
38 * tlb operations, e.g. tlb misses, so the operation needs
39 * to happen in real mode with all interruptions disabled.
42 /* pcxt_ssm_bug - relied upon translation! PA 2.0 Arch. F-4 and F-5 */
43 rsm PSW_SM_I, %r19 /* save I-bit state */
51 rsm PSW_SM_Q, %r0 /* prep to load iia queue */
52 mtctl %r0, %cr17 /* Clear IIASQ tail */
53 mtctl %r0, %cr17 /* Clear IIASQ head */
54 mtctl %r1, %cr18 /* IIAOQ head */
56 mtctl %r1, %cr18 /* IIAOQ tail */
57 load32 REAL_MODE_PSW, %r1
62 1: load32 PA(cache_info), %r1
64 /* Flush Instruction Tlb */
66 88: LDREG ITLB_SID_BASE(%r1), %r20
67 LDREG ITLB_SID_STRIDE(%r1), %r21
68 LDREG ITLB_SID_COUNT(%r1), %r22
69 LDREG ITLB_OFF_BASE(%r1), %arg0
70 LDREG ITLB_OFF_STRIDE(%r1), %arg1
71 LDREG ITLB_OFF_COUNT(%r1), %arg2
72 LDREG ITLB_LOOP(%r1), %arg3
74 addib,COND(=) -1, %arg3, fitoneloop /* Preadjust and test */
75 movb,<,n %arg3, %r31, fitdone /* If loop < 0, skip */
76 copy %arg0, %r28 /* Init base addr */
78 fitmanyloop: /* Loop if LOOP >= 2 */
80 add %r21, %r20, %r20 /* increment space */
81 copy %arg2, %r29 /* Init middle loop count */
83 fitmanymiddle: /* Loop if LOOP >= 2 */
84 addib,COND(>) -1, %r31, fitmanymiddle /* Adjusted inner loop decr */
85 pitlbe %r0(%sr1, %r28)
86 pitlbe,m %arg1(%sr1, %r28) /* Last pitlbe and addr adjust */
87 addib,COND(>) -1, %r29, fitmanymiddle /* Middle loop decr */
88 copy %arg3, %r31 /* Re-init inner loop count */
90 movb,tr %arg0, %r28, fitmanyloop /* Re-init base addr */
91 addib,COND(<=),n -1, %r22, fitdone /* Outer loop count decr */
93 fitoneloop: /* Loop if LOOP = 1 */
95 copy %arg0, %r28 /* init base addr */
96 copy %arg2, %r29 /* init middle loop count */
98 fitonemiddle: /* Loop if LOOP = 1 */
99 addib,COND(>) -1, %r29, fitonemiddle /* Middle loop count decr */
100 pitlbe,m %arg1(%sr1, %r28) /* pitlbe for one loop */
102 addib,COND(>) -1, %r22, fitoneloop /* Outer loop count decr */
103 add %r21, %r20, %r20 /* increment space */
106 ALTERNATIVE(88b, fitdone, ALT_COND_NO_SPLIT_TLB, INSN_NOP)
110 LDREG DTLB_SID_BASE(%r1), %r20
111 LDREG DTLB_SID_STRIDE(%r1), %r21
112 LDREG DTLB_SID_COUNT(%r1), %r22
113 LDREG DTLB_OFF_BASE(%r1), %arg0
114 LDREG DTLB_OFF_STRIDE(%r1), %arg1
115 LDREG DTLB_OFF_COUNT(%r1), %arg2
116 LDREG DTLB_LOOP(%r1), %arg3
118 addib,COND(=) -1, %arg3, fdtoneloop /* Preadjust and test */
119 movb,<,n %arg3, %r31, fdtdone /* If loop < 0, skip */
120 copy %arg0, %r28 /* Init base addr */
122 fdtmanyloop: /* Loop if LOOP >= 2 */
124 add %r21, %r20, %r20 /* increment space */
125 copy %arg2, %r29 /* Init middle loop count */
127 fdtmanymiddle: /* Loop if LOOP >= 2 */
128 addib,COND(>) -1, %r31, fdtmanymiddle /* Adjusted inner loop decr */
129 pdtlbe %r0(%sr1, %r28)
130 pdtlbe,m %arg1(%sr1, %r28) /* Last pdtlbe and addr adjust */
131 addib,COND(>) -1, %r29, fdtmanymiddle /* Middle loop decr */
132 copy %arg3, %r31 /* Re-init inner loop count */
134 movb,tr %arg0, %r28, fdtmanyloop /* Re-init base addr */
135 addib,COND(<=),n -1, %r22,fdtdone /* Outer loop count decr */
137 fdtoneloop: /* Loop if LOOP = 1 */
139 copy %arg0, %r28 /* init base addr */
140 copy %arg2, %r29 /* init middle loop count */
142 fdtonemiddle: /* Loop if LOOP = 1 */
143 addib,COND(>) -1, %r29, fdtonemiddle /* Middle loop count decr */
144 pdtlbe,m %arg1(%sr1, %r28) /* pdtlbe for one loop */
146 addib,COND(>) -1, %r22, fdtoneloop /* Outer loop count decr */
147 add %r21, %r20, %r20 /* increment space */
152 * Switch back to virtual mode
163 rsm PSW_SM_Q, %r0 /* prep to load iia queue */
164 mtctl %r0, %cr17 /* Clear IIASQ tail */
165 mtctl %r0, %cr17 /* Clear IIASQ head */
166 mtctl %r1, %cr18 /* IIAOQ head */
168 mtctl %r1, %cr18 /* IIAOQ tail */
169 load32 KERNEL_PSW, %r1
170 or %r1, %r19, %r1 /* I-bit to state on entry */
171 mtctl %r1, %ipsw /* restore I-bit (entire PSW) */
179 * When running in qemu, drop whole flush_tlb_all_local function and
180 * replace by one pdtlbe instruction, for which QEMU will drop all
183 3: pdtlbe %r0(%sr1,%r0)
185 ALTERNATIVE_CODE(flush_tlb_all_local, 2, ALT_COND_RUN_ON_QEMU, 3b)
186 ENDPROC_CFI(flush_tlb_all_local)
188 .import cache_info,data
190 ENTRY_CFI(flush_instruction_cache_local)
191 88: load32 cache_info, %r1
193 /* Flush Instruction Cache */
195 LDREG ICACHE_BASE(%r1), %arg0
196 LDREG ICACHE_STRIDE(%r1), %arg1
197 LDREG ICACHE_COUNT(%r1), %arg2
198 LDREG ICACHE_LOOP(%r1), %arg3
199 rsm PSW_SM_I, %r22 /* No mmgt ops during loop*/
201 addib,COND(=) -1, %arg3, fioneloop /* Preadjust and test */
202 movb,<,n %arg3, %r31, fisync /* If loop < 0, do sync */
204 fimanyloop: /* Loop if LOOP >= 2 */
205 addib,COND(>) -1, %r31, fimanyloop /* Adjusted inner loop decr */
206 fice %r0(%sr1, %arg0)
207 fice,m %arg1(%sr1, %arg0) /* Last fice and addr adjust */
208 movb,tr %arg3, %r31, fimanyloop /* Re-init inner loop count */
209 addib,COND(<=),n -1, %arg2, fisync /* Outer loop decr */
211 fioneloop: /* Loop if LOOP = 1 */
212 /* Some implementations may flush with a single fice instruction */
213 cmpib,COND(>>=),n 15, %arg2, fioneloop2
216 fice,m %arg1(%sr1, %arg0)
217 fice,m %arg1(%sr1, %arg0)
218 fice,m %arg1(%sr1, %arg0)
219 fice,m %arg1(%sr1, %arg0)
220 fice,m %arg1(%sr1, %arg0)
221 fice,m %arg1(%sr1, %arg0)
222 fice,m %arg1(%sr1, %arg0)
223 fice,m %arg1(%sr1, %arg0)
224 fice,m %arg1(%sr1, %arg0)
225 fice,m %arg1(%sr1, %arg0)
226 fice,m %arg1(%sr1, %arg0)
227 fice,m %arg1(%sr1, %arg0)
228 fice,m %arg1(%sr1, %arg0)
229 fice,m %arg1(%sr1, %arg0)
230 fice,m %arg1(%sr1, %arg0)
231 addib,COND(>) -16, %arg2, fioneloop1
232 fice,m %arg1(%sr1, %arg0)
235 cmpb,COND(=),n %arg2, %r0, fisync /* Predict branch taken */
238 addib,COND(>) -1, %arg2, fioneloop2 /* Outer loop count decr */
239 fice,m %arg1(%sr1, %arg0) /* Fice for one loop */
243 mtsm %r22 /* restore I-bit */
244 89: ALTERNATIVE(88b, 89b, ALT_COND_NO_ICACHE, INSN_NOP)
247 ENDPROC_CFI(flush_instruction_cache_local)
250 .import cache_info, data
251 ENTRY_CFI(flush_data_cache_local)
252 88: load32 cache_info, %r1
254 /* Flush Data Cache */
256 LDREG DCACHE_BASE(%r1), %arg0
257 LDREG DCACHE_STRIDE(%r1), %arg1
258 LDREG DCACHE_COUNT(%r1), %arg2
259 LDREG DCACHE_LOOP(%r1), %arg3
260 rsm PSW_SM_I, %r22 /* No mmgt ops during loop*/
262 addib,COND(=) -1, %arg3, fdoneloop /* Preadjust and test */
263 movb,<,n %arg3, %r31, fdsync /* If loop < 0, do sync */
265 fdmanyloop: /* Loop if LOOP >= 2 */
266 addib,COND(>) -1, %r31, fdmanyloop /* Adjusted inner loop decr */
267 fdce %r0(%sr1, %arg0)
268 fdce,m %arg1(%sr1, %arg0) /* Last fdce and addr adjust */
269 movb,tr %arg3, %r31, fdmanyloop /* Re-init inner loop count */
270 addib,COND(<=),n -1, %arg2, fdsync /* Outer loop decr */
272 fdoneloop: /* Loop if LOOP = 1 */
273 /* Some implementations may flush with a single fdce instruction */
274 cmpib,COND(>>=),n 15, %arg2, fdoneloop2
277 fdce,m %arg1(%sr1, %arg0)
278 fdce,m %arg1(%sr1, %arg0)
279 fdce,m %arg1(%sr1, %arg0)
280 fdce,m %arg1(%sr1, %arg0)
281 fdce,m %arg1(%sr1, %arg0)
282 fdce,m %arg1(%sr1, %arg0)
283 fdce,m %arg1(%sr1, %arg0)
284 fdce,m %arg1(%sr1, %arg0)
285 fdce,m %arg1(%sr1, %arg0)
286 fdce,m %arg1(%sr1, %arg0)
287 fdce,m %arg1(%sr1, %arg0)
288 fdce,m %arg1(%sr1, %arg0)
289 fdce,m %arg1(%sr1, %arg0)
290 fdce,m %arg1(%sr1, %arg0)
291 fdce,m %arg1(%sr1, %arg0)
292 addib,COND(>) -16, %arg2, fdoneloop1
293 fdce,m %arg1(%sr1, %arg0)
296 cmpb,COND(=),n %arg2, %r0, fdsync /* Predict branch taken */
299 addib,COND(>) -1, %arg2, fdoneloop2 /* Outer loop count decr */
300 fdce,m %arg1(%sr1, %arg0) /* Fdce for one loop */
304 mtsm %r22 /* restore I-bit */
305 89: ALTERNATIVE(88b, 89b, ALT_COND_NO_DCACHE, INSN_NOP)
308 ENDPROC_CFI(flush_data_cache_local)
310 /* Clear page using kernel mapping. */
312 ENTRY_CFI(clear_page_asm)
315 /* Unroll the loop. */
316 ldi (PAGE_SIZE / 128), %r1
336 /* Note reverse branch hint for addib is taken. */
337 addib,COND(>),n -1, %r1, 1b
343 * Note that until (if) we start saving the full 64-bit register
344 * values on interrupt, we can't use std on a 32 bit kernel.
346 ldi (PAGE_SIZE / 64), %r1
366 addib,COND(>),n -1, %r1, 1b
371 ENDPROC_CFI(clear_page_asm)
373 /* Copy page using kernel mapping. */
375 ENTRY_CFI(copy_page_asm)
377 /* PA8x00 CPUs can consume 2 loads or 1 store per cycle.
378 * Unroll the loop by hand and arrange insn appropriately.
379 * Prefetch doesn't improve performance on rp3440.
380 * GCC probably can do this just as well...
383 ldi (PAGE_SIZE / 128), %r1
427 /* Note reverse branch hint for addib is taken. */
428 addib,COND(>),n -1, %r1, 1b
434 * This loop is optimized for PCXL/PCXL2 ldw/ldw and stw/stw
435 * bundles (very restricted rules for bundling).
436 * Note that until (if) we start saving
437 * the full 64 bit register values on interrupt, we can't
438 * use ldd/std on a 32 bit kernel.
441 ldi (PAGE_SIZE / 64), %r1
477 addib,COND(>),n -1, %r1, 1b
482 ENDPROC_CFI(copy_page_asm)
485 * NOTE: Code in clear_user_page has a hard coded dependency on the
486 * maximum alias boundary being 4 Mb. We've been assured by the
487 * parisc chip designers that there will not ever be a parisc
488 * chip with a larger alias boundary (Never say never :-) ).
490 * Yah, what about the PA8800 and PA8900 processors?
492 * Subtle: the dtlb miss handlers support the temp alias region by
493 * "knowing" that if a dtlb miss happens within the temp alias
494 * region it must have occurred while in clear_user_page. Since
495 * this routine makes use of processor local translations, we
496 * don't want to insert them into the kernel page table. Instead,
497 * we load up some general registers (they need to be registers
498 * which aren't shadowed) with the physical page numbers (preshifted
499 * for tlb insertion) needed to insert the translations. When we
500 * miss on the translation, the dtlb miss handler inserts the
501 * translation into the tlb using these values:
503 * %r26 physical address of "to" translation
504 * %r23 physical address of "from" translation
508 * copy_user_page_asm() performs a page copy using mappings
509 * equivalent to the user page mappings. It can be used to
510 * implement copy_user_page() but unfortunately both the `from'
511 * and `to' pages need to be flushed through mappings equivalent
512 * to the user mappings after the copy because the kernel accesses
513 * the `from' page through the kmap kernel mapping and the `to'
514 * page needs to be flushed since code can be copied. As a
515 * result, this implementation is less efficient than the simpler
516 * copy using the kernel mapping. It only needs the `from' page
517 * to flushed via the user mapping. The kunmap routines handle
518 * the flushes needed for the kernel mapping.
520 * I'm still keeping this around because it may be possible to
521 * use it if more information is passed into copy_user_page().
522 * Have to do some measurements to see if it is worthwhile to
523 * lobby for such a change.
527 ENTRY_CFI(copy_user_page_asm)
528 /* Convert virtual `to' and `from' addresses to physical addresses.
529 Move `from' physical address to non shadowed register. */
530 ldil L%(__PAGE_OFFSET), %r1
534 ldil L%(TMPALIAS_MAP_START), %r28
535 dep_safe %r24, 31,TMPALIAS_SIZE_BITS, %r28 /* Form aliased virtual address 'to' */
536 depi_safe 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */
538 depi_safe 1, 31-TMPALIAS_SIZE_BITS,1, %r29 /* Form aliased virtual address 'from' */
540 /* Purge any old translations */
548 ALTERNATIVE(0b, 0b+4, ALT_COND_NO_SMP, INSN_PxTLB)
549 ALTERNATIVE(1b, 1b+4, ALT_COND_NO_SMP, INSN_PxTLB)
553 /* PA8x00 CPUs can consume 2 loads or 1 store per cycle.
554 * Unroll the loop by hand and arrange insn appropriately.
555 * GCC probably can do this just as well.
559 ldi (PAGE_SIZE / 128), %r1
603 /* conditional branches nullify on forward taken branch, and on
604 * non-taken backward branch. Note that .+4 is a backwards branch.
605 * The ldd should only get executed if the branch is taken.
607 addib,COND(>),n -1, %r1, 1b /* bundle 10 */
608 ldd 0(%r29), %r19 /* start next loads */
611 ldi (PAGE_SIZE / 64), %r1
614 * This loop is optimized for PCXL/PCXL2 ldw/ldw and stw/stw
615 * bundles (very restricted rules for bundling). It probably
616 * does OK on PCXU and better, but we could do better with
617 * ldd/std instructions. Note that until (if) we start saving
618 * the full 64 bit register values on interrupt, we can't
619 * use ldd/std on a 32 bit kernel.
656 addib,COND(>) -1, %r1,1b
662 ENDPROC_CFI(copy_user_page_asm)
664 ENTRY_CFI(clear_user_page_asm)
667 ldil L%(TMPALIAS_MAP_START), %r28
668 dep_safe %r25, 31,TMPALIAS_SIZE_BITS, %r28 /* Form aliased virtual address 'to' */
669 depi_safe 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */
671 /* Purge any old translation */
677 ALTERNATIVE(0b, 0b+4, ALT_COND_NO_SMP, INSN_PxTLB)
681 ldi (PAGE_SIZE / 128), %r1
683 /* PREFETCH (Write) has not (yet) been proven to help here */
684 /* #define PREFETCHW_OP ldd 256(%0), %r0 */
702 addib,COND(>) -1, %r1, 1b
705 #else /* ! CONFIG_64BIT */
706 ldi (PAGE_SIZE / 64), %r1
724 addib,COND(>) -1, %r1, 1b
726 #endif /* CONFIG_64BIT */
730 ENDPROC_CFI(clear_user_page_asm)
732 ENTRY_CFI(flush_dcache_page_asm)
733 ldil L%(TMPALIAS_MAP_START), %r28
734 dep_safe %r25, 31,TMPALIAS_SIZE_BITS, %r28 /* Form aliased virtual address 'to' */
735 depi_safe 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */
737 /* Purge any old translation */
743 ALTERNATIVE(0b, 0b+4, ALT_COND_NO_SMP, INSN_PxTLB)
746 88: ldil L%dcache_stride, %r1
747 ldw R%dcache_stride(%r1), r31
750 depdi,z 1, 63-PAGE_SHIFT,1, %r25
752 depwi,z 1, 31-PAGE_SHIFT,1, %r25
772 cmpb,COND(>>) %r25, %r28, 1b /* predict taken */
775 89: ALTERNATIVE(88b, 89b, ALT_COND_NO_DCACHE, INSN_NOP)
779 ENDPROC_CFI(flush_dcache_page_asm)
781 ENTRY_CFI(purge_dcache_page_asm)
782 ldil L%(TMPALIAS_MAP_START), %r28
783 dep_safe %r25, 31,TMPALIAS_SIZE_BITS, %r28 /* Form aliased virtual address 'to' */
784 depi_safe 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */
786 /* Purge any old translation */
792 ALTERNATIVE(0b, 0b+4, ALT_COND_NO_SMP, INSN_PxTLB)
795 88: ldil L%dcache_stride, %r1
796 ldw R%dcache_stride(%r1), r31
799 depdi,z 1, 63-PAGE_SHIFT,1, %r25
801 depwi,z 1, 31-PAGE_SHIFT,1, %r25
821 cmpb,COND(>>) %r25, %r28, 1b /* predict taken */
824 89: ALTERNATIVE(88b, 89b, ALT_COND_NO_DCACHE, INSN_NOP)
828 ENDPROC_CFI(purge_dcache_page_asm)
830 ENTRY_CFI(flush_icache_page_asm)
831 ldil L%(TMPALIAS_MAP_START), %r28
832 dep_safe %r25, 31,TMPALIAS_SIZE_BITS, %r28 /* Form aliased virtual address 'to' */
833 depi_safe 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */
835 /* Purge any old translation. Note that the FIC instruction
836 * may use either the instruction or data TLB. Given that we
837 * have a flat address space, it's not clear which TLB will be
838 * used. So, we purge both entries. */
842 1: pitlb,l %r0(%sr4,%r28)
843 ALTERNATIVE(1b, 1b+4, ALT_COND_NO_SPLIT_TLB, INSN_NOP)
846 1: pitlb %r0(%sr4,%r28)
847 ALTERNATIVE(0b, 0b+4, ALT_COND_NO_SMP, INSN_PxTLB)
848 ALTERNATIVE(1b, 1b+4, ALT_COND_NO_SMP, INSN_PxTLB)
849 ALTERNATIVE(1b, 1b+4, ALT_COND_NO_SPLIT_TLB, INSN_NOP)
852 88: ldil L%icache_stride, %r1
853 ldw R%icache_stride(%r1), %r31
856 depdi,z 1, 63-PAGE_SHIFT,1, %r25
858 depwi,z 1, 31-PAGE_SHIFT,1, %r25
863 /* fic only has the type 26 form on PA1.1, requiring an
864 * explicit space specification, so use %sr4 */
865 1: fic,m %r31(%sr4,%r28)
866 fic,m %r31(%sr4,%r28)
867 fic,m %r31(%sr4,%r28)
868 fic,m %r31(%sr4,%r28)
869 fic,m %r31(%sr4,%r28)
870 fic,m %r31(%sr4,%r28)
871 fic,m %r31(%sr4,%r28)
872 fic,m %r31(%sr4,%r28)
873 fic,m %r31(%sr4,%r28)
874 fic,m %r31(%sr4,%r28)
875 fic,m %r31(%sr4,%r28)
876 fic,m %r31(%sr4,%r28)
877 fic,m %r31(%sr4,%r28)
878 fic,m %r31(%sr4,%r28)
879 fic,m %r31(%sr4,%r28)
880 cmpb,COND(>>) %r25, %r28, 1b /* predict taken */
881 fic,m %r31(%sr4,%r28)
883 89: ALTERNATIVE(88b, 89b, ALT_COND_NO_ICACHE, INSN_NOP)
887 ENDPROC_CFI(flush_icache_page_asm)
889 ENTRY_CFI(flush_kernel_dcache_page_asm)
890 88: ldil L%dcache_stride, %r1
891 ldw R%dcache_stride(%r1), %r23
892 depi_safe 0, 31,PAGE_SHIFT, %r26 /* Clear any offset bits */
895 depdi,z 1, 63-PAGE_SHIFT,1, %r25
897 depwi,z 1, 31-PAGE_SHIFT,1, %r25
917 cmpb,COND(>>) %r25, %r26, 1b /* predict taken */
920 89: ALTERNATIVE(88b, 89b, ALT_COND_NO_DCACHE, INSN_NOP)
924 ENDPROC_CFI(flush_kernel_dcache_page_asm)
926 ENTRY_CFI(purge_kernel_dcache_page_asm)
927 88: ldil L%dcache_stride, %r1
928 ldw R%dcache_stride(%r1), %r23
929 depi_safe 0, 31,PAGE_SHIFT, %r26 /* Clear any offset bits */
932 depdi,z 1, 63-PAGE_SHIFT,1, %r25
934 depwi,z 1, 31-PAGE_SHIFT,1, %r25
954 cmpb,COND(>>) %r25, %r26, 1b /* predict taken */
957 89: ALTERNATIVE(88b, 89b, ALT_COND_NO_DCACHE, INSN_NOP)
961 ENDPROC_CFI(purge_kernel_dcache_page_asm)
963 ENTRY_CFI(flush_user_dcache_range_asm)
964 88: ldil L%dcache_stride, %r1
965 ldw R%dcache_stride(%r1), %r23
967 ANDCM %r26, %r21, %r26
970 depd,z %r23, 59, 60, %r21
972 depw,z %r23, 27, 28, %r21
975 cmpb,COND(>>),n %r22, %r25, 2f /* predict not taken */
976 1: add %r22, %r21, %r22
977 fdc,m %r23(%sr3, %r26)
978 fdc,m %r23(%sr3, %r26)
979 fdc,m %r23(%sr3, %r26)
980 fdc,m %r23(%sr3, %r26)
981 fdc,m %r23(%sr3, %r26)
982 fdc,m %r23(%sr3, %r26)
983 fdc,m %r23(%sr3, %r26)
984 fdc,m %r23(%sr3, %r26)
985 fdc,m %r23(%sr3, %r26)
986 fdc,m %r23(%sr3, %r26)
987 fdc,m %r23(%sr3, %r26)
988 fdc,m %r23(%sr3, %r26)
989 fdc,m %r23(%sr3, %r26)
990 fdc,m %r23(%sr3, %r26)
991 fdc,m %r23(%sr3, %r26)
992 cmpb,COND(<<=) %r22, %r25, 1b /* predict taken */
993 fdc,m %r23(%sr3, %r26)
995 2: cmpb,COND(>>),n %r25, %r26, 2b
996 fdc,m %r23(%sr3, %r26)
998 89: ALTERNATIVE(88b, 89b, ALT_COND_NO_DCACHE, INSN_NOP)
1002 ENDPROC_CFI(flush_user_dcache_range_asm)
1004 ENTRY_CFI(flush_kernel_dcache_range_asm)
1005 88: ldil L%dcache_stride, %r1
1006 ldw R%dcache_stride(%r1), %r23
1008 ANDCM %r26, %r21, %r26
1011 depd,z %r23, 59, 60, %r21
1013 depw,z %r23, 27, 28, %r21
1015 add %r26, %r21, %r22
1016 cmpb,COND(>>),n %r22, %r25, 2f /* predict not taken */
1017 1: add %r22, %r21, %r22
1033 cmpb,COND(<<=) %r22, %r25, 1b /* predict taken */
1036 2: cmpb,COND(>>),n %r25, %r26, 2b /* predict taken */
1040 89: ALTERNATIVE(88b, 89b, ALT_COND_NO_DCACHE, INSN_NOP)
1043 ENDPROC_CFI(flush_kernel_dcache_range_asm)
1045 ENTRY_CFI(purge_kernel_dcache_range_asm)
1046 88: ldil L%dcache_stride, %r1
1047 ldw R%dcache_stride(%r1), %r23
1049 ANDCM %r26, %r21, %r26
1052 depd,z %r23, 59, 60, %r21
1054 depw,z %r23, 27, 28, %r21
1056 add %r26, %r21, %r22
1057 cmpb,COND(>>),n %r22, %r25, 2f /* predict not taken */
1058 1: add %r22, %r21, %r22
1074 cmpb,COND(<<=) %r22, %r25, 1b /* predict taken */
1077 2: cmpb,COND(>>),n %r25, %r26, 2b /* predict taken */
1081 89: ALTERNATIVE(88b, 89b, ALT_COND_NO_DCACHE, INSN_NOP)
1084 ENDPROC_CFI(purge_kernel_dcache_range_asm)
1086 ENTRY_CFI(flush_user_icache_range_asm)
1087 88: ldil L%icache_stride, %r1
1088 ldw R%icache_stride(%r1), %r23
1090 ANDCM %r26, %r21, %r26
1093 depd,z %r23, 59, 60, %r21
1095 depw,z %r23, 27, 28, %r21
1097 add %r26, %r21, %r22
1098 cmpb,COND(>>),n %r22, %r25, 2f /* predict not taken */
1099 1: add %r22, %r21, %r22
1100 fic,m %r23(%sr3, %r26)
1101 fic,m %r23(%sr3, %r26)
1102 fic,m %r23(%sr3, %r26)
1103 fic,m %r23(%sr3, %r26)
1104 fic,m %r23(%sr3, %r26)
1105 fic,m %r23(%sr3, %r26)
1106 fic,m %r23(%sr3, %r26)
1107 fic,m %r23(%sr3, %r26)
1108 fic,m %r23(%sr3, %r26)
1109 fic,m %r23(%sr3, %r26)
1110 fic,m %r23(%sr3, %r26)
1111 fic,m %r23(%sr3, %r26)
1112 fic,m %r23(%sr3, %r26)
1113 fic,m %r23(%sr3, %r26)
1114 fic,m %r23(%sr3, %r26)
1115 cmpb,COND(<<=) %r22, %r25, 1b /* predict taken */
1116 fic,m %r23(%sr3, %r26)
1118 2: cmpb,COND(>>),n %r25, %r26, 2b
1119 fic,m %r23(%sr3, %r26)
1121 89: ALTERNATIVE(88b, 89b, ALT_COND_NO_ICACHE, INSN_NOP)
1125 ENDPROC_CFI(flush_user_icache_range_asm)
1127 ENTRY_CFI(flush_kernel_icache_page)
1128 88: ldil L%icache_stride, %r1
1129 ldw R%icache_stride(%r1), %r23
1132 depdi,z 1, 63-PAGE_SHIFT,1, %r25
1134 depwi,z 1, 31-PAGE_SHIFT,1, %r25
1136 add %r26, %r25, %r25
1137 sub %r25, %r23, %r25
1140 1: fic,m %r23(%sr4, %r26)
1141 fic,m %r23(%sr4, %r26)
1142 fic,m %r23(%sr4, %r26)
1143 fic,m %r23(%sr4, %r26)
1144 fic,m %r23(%sr4, %r26)
1145 fic,m %r23(%sr4, %r26)
1146 fic,m %r23(%sr4, %r26)
1147 fic,m %r23(%sr4, %r26)
1148 fic,m %r23(%sr4, %r26)
1149 fic,m %r23(%sr4, %r26)
1150 fic,m %r23(%sr4, %r26)
1151 fic,m %r23(%sr4, %r26)
1152 fic,m %r23(%sr4, %r26)
1153 fic,m %r23(%sr4, %r26)
1154 fic,m %r23(%sr4, %r26)
1155 cmpb,COND(>>) %r25, %r26, 1b /* predict taken */
1156 fic,m %r23(%sr4, %r26)
1158 89: ALTERNATIVE(88b, 89b, ALT_COND_NO_ICACHE, INSN_NOP)
1162 ENDPROC_CFI(flush_kernel_icache_page)
1164 ENTRY_CFI(flush_kernel_icache_range_asm)
1165 88: ldil L%icache_stride, %r1
1166 ldw R%icache_stride(%r1), %r23
1168 ANDCM %r26, %r21, %r26
1171 depd,z %r23, 59, 60, %r21
1173 depw,z %r23, 27, 28, %r21
1175 add %r26, %r21, %r22
1176 cmpb,COND(>>),n %r22, %r25, 2f /* predict not taken */
1177 1: add %r22, %r21, %r22
1178 fic,m %r23(%sr4, %r26)
1179 fic,m %r23(%sr4, %r26)
1180 fic,m %r23(%sr4, %r26)
1181 fic,m %r23(%sr4, %r26)
1182 fic,m %r23(%sr4, %r26)
1183 fic,m %r23(%sr4, %r26)
1184 fic,m %r23(%sr4, %r26)
1185 fic,m %r23(%sr4, %r26)
1186 fic,m %r23(%sr4, %r26)
1187 fic,m %r23(%sr4, %r26)
1188 fic,m %r23(%sr4, %r26)
1189 fic,m %r23(%sr4, %r26)
1190 fic,m %r23(%sr4, %r26)
1191 fic,m %r23(%sr4, %r26)
1192 fic,m %r23(%sr4, %r26)
1193 cmpb,COND(<<=) %r22, %r25, 1b /* predict taken */
1194 fic,m %r23(%sr4, %r26)
1196 2: cmpb,COND(>>),n %r25, %r26, 2b /* predict taken */
1197 fic,m %r23(%sr4, %r26)
1199 89: ALTERNATIVE(88b, 89b, ALT_COND_NO_ICACHE, INSN_NOP)
1203 ENDPROC_CFI(flush_kernel_icache_range_asm)
1207 /* align should cover use of rfi in disable_sr_hashing_asm and
1211 ENTRY_CFI(disable_sr_hashing_asm)
1213 * Switch to real mode
1224 rsm PSW_SM_Q, %r0 /* prep to load iia queue */
1225 mtctl %r0, %cr17 /* Clear IIASQ tail */
1226 mtctl %r0, %cr17 /* Clear IIASQ head */
1227 mtctl %r1, %cr18 /* IIAOQ head */
1229 mtctl %r1, %cr18 /* IIAOQ tail */
1230 load32 REAL_MODE_PSW, %r1
1235 1: cmpib,=,n SRHASH_PCXST, %r26,srdis_pcxs
1236 cmpib,=,n SRHASH_PCXL, %r26,srdis_pcxl
1237 cmpib,=,n SRHASH_PA20, %r26,srdis_pa20
1242 /* Disable Space Register Hashing for PCXS,PCXT,PCXT' */
1244 .word 0x141c1a00 /* mfdiag %dr0, %r28 */
1245 .word 0x141c1a00 /* must issue twice */
1246 depwi 0,18,1, %r28 /* Clear DHE (dcache hash enable) */
1247 depwi 0,20,1, %r28 /* Clear IHE (icache hash enable) */
1248 .word 0x141c1600 /* mtdiag %r28, %dr0 */
1249 .word 0x141c1600 /* must issue twice */
1254 /* Disable Space Register Hashing for PCXL */
1256 .word 0x141c0600 /* mfdiag %dr0, %r28 */
1257 depwi 0,28,2, %r28 /* Clear DHASH_EN & IHASH_EN */
1258 .word 0x141c0240 /* mtdiag %r28, %dr0 */
1263 /* Disable Space Register Hashing for PCXU,PCXU+,PCXW,PCXW+,PCXW2 */
1265 .word 0x144008bc /* mfdiag %dr2, %r28 */
1266 depdi 0, 54,1, %r28 /* clear DIAG_SPHASH_ENAB (bit 54) */
1267 .word 0x145c1840 /* mtdiag %r28, %dr2 */
1271 /* Switch back to virtual mode */
1272 rsm PSW_SM_I, %r0 /* prep to load iia queue */
1280 rsm PSW_SM_Q, %r0 /* prep to load iia queue */
1281 mtctl %r0, %cr17 /* Clear IIASQ tail */
1282 mtctl %r0, %cr17 /* Clear IIASQ head */
1283 mtctl %r1, %cr18 /* IIAOQ head */
1285 mtctl %r1, %cr18 /* IIAOQ tail */
1286 load32 KERNEL_PSW, %r1
1293 ENDPROC_CFI(disable_sr_hashing_asm)