1 /* $Id: ultra.S,v 1.72 2002/02/09 19:49:31 davem Exp $
2 * ultra.S: Don't expand these all over the place...
4 * Copyright (C) 1997, 2000 David S. Miller (davem@redhat.com)
7 #include <linux/config.h>
9 #include <asm/pgtable.h>
11 #include <asm/spitfire.h>
12 #include <asm/mmu_context.h>
15 #include <asm/thread_info.h>
16 #include <asm/cacheflush.h>
18 /* Basically, most of the Spitfire vs. Cheetah madness
19 * has to do with the fact that Cheetah does not support
20 * IMMU flushes out of the secondary context. Someone needs
21 * to throw a south lake birthday party for the folks
22 * in Microelectronics who refused to fix this shit.
25 /* This file is meant to be read efficiently by the CPU, not humans.
26 * Staraj sie tego nikomu nie pierdolnac...
31 __flush_tlb_mm: /* %o0=(ctx & TAG_CONTEXT_BITS), %o1=SECONDARY_CONTEXT */
32 ldxa [%o1] ASI_DMMU, %g2
34 bne,pn %icc, __spitfire_flush_tlb_mm_slow
36 stxa %g0, [%g3] ASI_DMMU_DEMAP
37 stxa %g0, [%g3] ASI_IMMU_DEMAP
50 .globl __flush_tlb_pending
52 /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
55 andn %g7, PSTATE_IE, %g2
57 mov SECONDARY_CONTEXT, %o4
58 ldxa [%o4] ASI_DMMU, %g2
59 stxa %o0, [%o4] ASI_DMMU
60 1: sub %o1, (1 << 3), %o1
66 stxa %g0, [%o3] ASI_IMMU_DEMAP
67 2: stxa %g0, [%o3] ASI_DMMU_DEMAP
71 stxa %g2, [%o4] ASI_DMMU
74 wrpr %g7, 0x0, %pstate
77 .globl __flush_tlb_kernel_range
78 __flush_tlb_kernel_range: /* %o0=start, %o1=end */
81 sethi %hi(PAGE_SIZE), %o4
84 or %o0, 0x20, %o0 ! Nucleus
85 1: stxa %g0, [%o0 + %o3] ASI_DMMU_DEMAP
86 stxa %g0, [%o0 + %o3] ASI_IMMU_DEMAP
93 __spitfire_flush_tlb_mm_slow:
95 wrpr %g1, PSTATE_IE, %pstate
96 stxa %o0, [%o1] ASI_DMMU
97 stxa %g0, [%g3] ASI_DMMU_DEMAP
98 stxa %g0, [%g3] ASI_IMMU_DEMAP
100 stxa %g2, [%o1] ASI_DMMU
106 * The following code flushes one page_size worth.
108 #if (PAGE_SHIFT == 13)
109 #define ITAG_MASK 0xfe
110 #elif (PAGE_SHIFT == 16)
111 #define ITAG_MASK 0x7fe
113 #error unsupported PAGE_SIZE
116 .globl __flush_icache_page
117 __flush_icache_page: /* %o0 = phys_page */
119 srlx %o0, PAGE_SHIFT, %o0
120 sethi %uhi(PAGE_OFFSET), %g1
121 sllx %o0, PAGE_SHIFT, %o0
122 sethi %hi(PAGE_SIZE), %g2
125 1: subcc %g2, 32, %g2
131 #ifdef DCACHE_ALIASING_POSSIBLE
133 #if (PAGE_SHIFT != 13)
134 #error only page shift of 13 is supported by dcache flush
137 #define DTAG_MASK 0x3
140 .globl __flush_dcache_page
141 __flush_dcache_page: /* %o0=kaddr, %o1=flush_icache */
142 sethi %uhi(PAGE_OFFSET), %g1
147 sethi %hi(1 << 14), %o2
148 1: ldxa [%o4] ASI_DCACHE_TAG, %o3 ! LSU Group
149 add %o4, (1 << 5), %o4 ! IEU0
150 ldxa [%o4] ASI_DCACHE_TAG, %g1 ! LSU Group
151 add %o4, (1 << 5), %o4 ! IEU0
152 ldxa [%o4] ASI_DCACHE_TAG, %g2 ! LSU Group o3 available
153 add %o4, (1 << 5), %o4 ! IEU0
154 andn %o3, DTAG_MASK, %o3 ! IEU1
155 ldxa [%o4] ASI_DCACHE_TAG, %g3 ! LSU Group
156 add %o4, (1 << 5), %o4 ! IEU0
157 andn %g1, DTAG_MASK, %g1 ! IEU1
158 cmp %o0, %o3 ! IEU1 Group
159 be,a,pn %xcc, dflush1 ! CTI
160 sub %o4, (4 << 5), %o4 ! IEU0 (Group)
161 cmp %o0, %g1 ! IEU1 Group
162 andn %g2, DTAG_MASK, %g2 ! IEU0
163 be,a,pn %xcc, dflush2 ! CTI
164 sub %o4, (3 << 5), %o4 ! IEU0 (Group)
165 cmp %o0, %g2 ! IEU1 Group
166 andn %g3, DTAG_MASK, %g3 ! IEU0
167 be,a,pn %xcc, dflush3 ! CTI
168 sub %o4, (2 << 5), %o4 ! IEU0 (Group)
169 cmp %o0, %g3 ! IEU1 Group
170 be,a,pn %xcc, dflush4 ! CTI
171 sub %o4, (1 << 5), %o4 ! IEU0
172 2: cmp %o4, %o2 ! IEU1 Group
173 bne,pt %xcc, 1b ! CTI
176 /* The I-cache does not snoop local stores so we
177 * better flush that too when necessary.
179 brnz,pt %o1, __flush_icache_page
184 dflush1:stxa %g0, [%o4] ASI_DCACHE_TAG
185 add %o4, (1 << 5), %o4
186 dflush2:stxa %g0, [%o4] ASI_DCACHE_TAG
187 add %o4, (1 << 5), %o4
188 dflush3:stxa %g0, [%o4] ASI_DCACHE_TAG
189 add %o4, (1 << 5), %o4
190 dflush4:stxa %g0, [%o4] ASI_DCACHE_TAG
191 add %o4, (1 << 5), %o4
195 #endif /* DCACHE_ALIASING_POSSIBLE */
200 wrpr %g7, PSTATE_IE, %pstate
201 mov TLB_TAG_ACCESS, %g1
202 stxa %o5, [%g1] ASI_DMMU
203 stxa %o2, [%g0] ASI_DTLB_DATA_IN
209 wrpr %g7, PSTATE_IE, %pstate
210 mov TLB_TAG_ACCESS, %g1
211 stxa %o5, [%g1] ASI_IMMU
212 stxa %o2, [%g0] ASI_ITLB_DATA_IN
217 .globl __update_mmu_cache
218 __update_mmu_cache: /* %o0=hw_context, %o1=address, %o2=pte, %o3=fault_code */
219 srlx %o1, PAGE_SHIFT, %o1
220 andcc %o3, FAULT_CODE_DTLB, %g0
221 sllx %o1, PAGE_SHIFT, %o5
222 bne,pt %xcc, __prefill_dtlb
224 ba,a,pt %xcc, __prefill_itlb
226 /* Cheetah specific versions, patched at boot time.
228 * This writes of the PRIMARY_CONTEXT register in this file are
229 * safe even on Cheetah+ and later wrt. the page size fields.
230 * The nucleus page size fields do not matter because we make
231 * no data references, and these instructions execute out of a
232 * locked I-TLB entry sitting in the fully assosciative I-TLB.
233 * This sequence should also never trap.
235 __cheetah_flush_tlb_mm: /* 15 insns */
237 andn %g7, PSTATE_IE, %g2
238 wrpr %g2, 0x0, %pstate
240 mov PRIMARY_CONTEXT, %o2
242 ldxa [%o2] ASI_DMMU, %g2
243 stxa %o0, [%o2] ASI_DMMU
244 stxa %g0, [%g3] ASI_DMMU_DEMAP
245 stxa %g0, [%g3] ASI_IMMU_DEMAP
246 stxa %g2, [%o2] ASI_DMMU
250 wrpr %g7, 0x0, %pstate
252 __cheetah_flush_tlb_pending: /* 22 insns */
253 /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
256 andn %g7, PSTATE_IE, %g2
257 wrpr %g2, 0x0, %pstate
259 mov PRIMARY_CONTEXT, %o4
260 ldxa [%o4] ASI_DMMU, %g2
261 stxa %o0, [%o4] ASI_DMMU
262 1: sub %o1, (1 << 3), %o1
267 stxa %g0, [%o3] ASI_IMMU_DEMAP
268 2: stxa %g0, [%o3] ASI_DMMU_DEMAP
271 stxa %g2, [%o4] ASI_DMMU
275 wrpr %g7, 0x0, %pstate
277 #ifdef DCACHE_ALIASING_POSSIBLE
278 flush_dcpage_cheetah: /* 11 insns */
279 sethi %uhi(PAGE_OFFSET), %g1
282 sethi %hi(PAGE_SIZE), %o4
283 1: subcc %o4, (1 << 5), %o4
284 stxa %g0, [%o0 + %o4] ASI_DCACHE_INVALIDATE
288 retl /* I-cache flush never needed on Cheetah, see callers. */
290 #endif /* DCACHE_ALIASING_POSSIBLE */
303 .globl cheetah_patch_cachetlbops
304 cheetah_patch_cachetlbops:
307 sethi %hi(__flush_tlb_mm), %o0
308 or %o0, %lo(__flush_tlb_mm), %o0
309 sethi %hi(__cheetah_flush_tlb_mm), %o1
310 or %o1, %lo(__cheetah_flush_tlb_mm), %o1
311 call cheetah_patch_one
314 sethi %hi(__flush_tlb_pending), %o0
315 or %o0, %lo(__flush_tlb_pending), %o0
316 sethi %hi(__cheetah_flush_tlb_pending), %o1
317 or %o1, %lo(__cheetah_flush_tlb_pending), %o1
318 call cheetah_patch_one
321 #ifdef DCACHE_ALIASING_POSSIBLE
322 sethi %hi(__flush_dcache_page), %o0
323 or %o0, %lo(__flush_dcache_page), %o0
324 sethi %hi(flush_dcpage_cheetah), %o1
325 or %o1, %lo(flush_dcpage_cheetah), %o1
326 call cheetah_patch_one
328 #endif /* DCACHE_ALIASING_POSSIBLE */
334 /* These are all called by the slaves of a cross call, at
335 * trap level 1, with interrupts fully disabled.
338 * %g5 mm->context (all tlb flushes)
339 * %g1 address arg 1 (tlb page and range flushes)
340 * %g7 address arg 2 (tlb range flush only)
342 * %g6 ivector table, don't touch
347 * TODO: Make xcall TLB range flushes use the tricks above... -DaveM
350 .globl xcall_flush_tlb_mm
352 mov PRIMARY_CONTEXT, %g2
354 ldxa [%g2] ASI_DMMU, %g3
355 stxa %g5, [%g2] ASI_DMMU
356 stxa %g0, [%g4] ASI_DMMU_DEMAP
357 stxa %g0, [%g4] ASI_IMMU_DEMAP
358 stxa %g3, [%g2] ASI_DMMU
361 .globl xcall_flush_tlb_pending
362 xcall_flush_tlb_pending:
363 /* %g5=context, %g1=nr, %g7=vaddrs[] */
365 mov PRIMARY_CONTEXT, %g4
366 ldxa [%g4] ASI_DMMU, %g2
367 stxa %g5, [%g4] ASI_DMMU
368 1: sub %g1, (1 << 3), %g1
374 stxa %g0, [%g5] ASI_IMMU_DEMAP
375 2: stxa %g0, [%g5] ASI_DMMU_DEMAP
379 stxa %g2, [%g4] ASI_DMMU
382 .globl xcall_flush_tlb_kernel_range
383 xcall_flush_tlb_kernel_range:
384 sethi %hi(PAGE_SIZE - 1), %g2
385 or %g2, %lo(PAGE_SIZE - 1), %g2
391 or %g1, 0x20, %g1 ! Nucleus
392 1: stxa %g0, [%g1 + %g3] ASI_DMMU_DEMAP
393 stxa %g0, [%g1 + %g3] ASI_IMMU_DEMAP
401 /* This runs in a very controlled environment, so we do
402 * not need to worry about BH races etc.
404 .globl xcall_sync_tick
407 wrpr %g2, PSTATE_IG | PSTATE_AG, %pstate
412 109: or %g7, %lo(109b), %g7
413 call smp_synchronize_tick_client
417 ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
419 /* NOTE: This is SPECIAL!! We do etrap/rtrap however
420 * we choose to deal with the "BH's run with
421 * %pil==15" problem (described in asm/pil.h)
422 * by just invoking rtrap directly past where
423 * BH's are checked for.
425 * We do it like this because we do not want %pil==15
426 * lockups to prevent regs being reported.
428 .globl xcall_report_regs
431 wrpr %g2, PSTATE_IG | PSTATE_AG, %pstate
436 109: or %g7, %lo(109b), %g7
438 add %sp, PTREGS_OFF, %o0
440 /* Has to be a non-v9 branch due to the large distance. */
442 ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
444 #ifdef DCACHE_ALIASING_POSSIBLE
446 .globl xcall_flush_dcache_page_cheetah
447 xcall_flush_dcache_page_cheetah: /* %g1 == physical page address */
448 sethi %hi(PAGE_SIZE), %g3
449 1: subcc %g3, (1 << 5), %g3
450 stxa %g0, [%g1 + %g3] ASI_DCACHE_INVALIDATE
456 #endif /* DCACHE_ALIASING_POSSIBLE */
458 .globl xcall_flush_dcache_page_spitfire
459 xcall_flush_dcache_page_spitfire: /* %g1 == physical page address
460 %g7 == kernel page virtual address
461 %g5 == (page->mapping != NULL) */
462 #ifdef DCACHE_ALIASING_POSSIBLE
463 srlx %g1, (13 - 2), %g1 ! Form tag comparitor
464 sethi %hi(L1DCACHE_SIZE), %g3 ! D$ size == 16K
465 sub %g3, (1 << 5), %g3 ! D$ linesize == 32
466 1: ldxa [%g3] ASI_DCACHE_TAG, %g2
474 stxa %g0, [%g3] ASI_DCACHE_TAG
478 sub %g3, (1 << 5), %g3
481 #endif /* DCACHE_ALIASING_POSSIBLE */
482 sethi %hi(PAGE_SIZE), %g3
485 subcc %g3, (1 << 5), %g3
487 add %g7, (1 << 5), %g7
493 .globl xcall_promstop
496 wrpr %g2, PSTATE_IG | PSTATE_AG, %pstate
501 109: or %g7, %lo(109b), %g7
505 /* We should not return, just spin if we do... */
516 /* These two are not performance critical... */
517 .globl xcall_flush_tlb_all_spitfire
518 xcall_flush_tlb_all_spitfire:
519 /* Spitfire Errata #32 workaround. */
520 sethi %hi(errata32_hwbug), %g4
521 stx %g0, [%g4 + %lo(errata32_hwbug)]
525 1: ldxa [%g3] ASI_DTLB_DATA_ACCESS, %g4
526 and %g4, _PAGE_L, %g5
528 mov TLB_TAG_ACCESS, %g7
530 stxa %g0, [%g7] ASI_DMMU
532 stxa %g0, [%g3] ASI_DTLB_DATA_ACCESS
535 /* Spitfire Errata #32 workaround. */
536 sethi %hi(errata32_hwbug), %g4
537 stx %g0, [%g4 + %lo(errata32_hwbug)]
539 2: ldxa [%g3] ASI_ITLB_DATA_ACCESS, %g4
540 and %g4, _PAGE_L, %g5
542 mov TLB_TAG_ACCESS, %g7
544 stxa %g0, [%g7] ASI_IMMU
546 stxa %g0, [%g3] ASI_ITLB_DATA_ACCESS
549 /* Spitfire Errata #32 workaround. */
550 sethi %hi(errata32_hwbug), %g4
551 stx %g0, [%g4 + %lo(errata32_hwbug)]
554 cmp %g2, SPITFIRE_HIGHEST_LOCKED_TLBENT
560 .globl xcall_flush_tlb_all_cheetah
561 xcall_flush_tlb_all_cheetah:
563 stxa %g0, [%g2] ASI_DMMU_DEMAP
564 stxa %g0, [%g2] ASI_IMMU_DEMAP
567 /* These just get rescheduled to PIL vectors. */
568 .globl xcall_call_function
570 wr %g0, (1 << PIL_SMP_CALL_FUNC), %set_softint
573 .globl xcall_receive_signal
574 xcall_receive_signal:
575 wr %g0, (1 << PIL_SMP_RECEIVE_SIGNAL), %set_softint
580 wr %g0, (1 << PIL_SMP_CAPTURE), %set_softint
583 #endif /* CONFIG_SMP */