1 /* arch/sparc64/kernel/ktlb.S: Kernel mapping TLB miss handling.
3 * Copyright (C) 1995, 1997, 2005, 2008 David S. Miller <davem@davemloft.net>
4 * Copyright (C) 1996 Eddie C. Dost (ecd@brainaid.de)
5 * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx)
6 * Copyright (C) 1996,98,99 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
12 #include <asm/pgtable.h>
20 mov TLB_TAG_ACCESS, %g4
21 ldxa [%g4] ASI_IMMU, %g4
23 /* The kernel executes in context zero, therefore we do not
24 * need to clear the context ID bits out of %g4 here.
27 /* sun4v_itlb_miss branches here with the missing virtual
28 * address already loaded into %g4
32 /* Catch kernel NULL pointer calls. */
33 sethi %hi(PAGE_SIZE), %g5
35 blu,pn %xcc, kvmap_itlb_longpath
38 KERN_TSB_LOOKUP_TL1(%g4, %g6, %g5, %g1, %g2, %g3, kvmap_itlb_load)
41 sethi %hi(LOW_OBP_ADDRESS), %g5
43 blu,pn %xcc, kvmap_itlb_vmalloc_addr
47 blu,pn %xcc, kvmap_itlb_obp
50 kvmap_itlb_vmalloc_addr:
51 KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_itlb_longpath)
53 TSB_LOCK_TAG(%g1, %g2, %g7)
54 TSB_WRITE(%g1, %g5, %g6)
56 /* fallthrough to TLB load */
60 661: stxa %g5, [%g0] ASI_ITLB_DATA_IN
62 .section .sun4v_2insn_patch, "ax"
68 /* For sun4v the ASI_ITLB_DATA_IN store and the retry
69 * instruction get nop'd out and we get here to branch
70 * to the sun4v tlb load code. The registers are setup
77 * The sun4v TLB load wants the PTE in %g3 so we fix that
80 ba,pt %xcc, sun4v_itlb_load
85 661: rdpr %pstate, %g5
86 wrpr %g5, PSTATE_AG | PSTATE_MG, %pstate
87 .section .sun4v_2insn_patch, "ax"
94 ba,pt %xcc, sparc64_realfault_common
95 mov FAULT_CODE_ITLB, %g4
98 OBP_TRANS_LOOKUP(%g4, %g5, %g2, %g3, kvmap_itlb_longpath)
100 TSB_LOCK_TAG(%g1, %g2, %g7)
102 TSB_WRITE(%g1, %g5, %g6)
104 ba,pt %xcc, kvmap_itlb_load
108 OBP_TRANS_LOOKUP(%g4, %g5, %g2, %g3, kvmap_dtlb_longpath)
110 TSB_LOCK_TAG(%g1, %g2, %g7)
112 TSB_WRITE(%g1, %g5, %g6)
114 ba,pt %xcc, kvmap_dtlb_load
118 sethi %hi(kern_linear_pte_xor), %g7
119 ldx [%g7 + %lo(kern_linear_pte_xor)], %g2
120 ba,pt %xcc, kvmap_dtlb_tsb4m_load
124 kvmap_dtlb_tsb4m_load:
125 TSB_LOCK_TAG(%g1, %g2, %g7)
126 TSB_WRITE(%g1, %g5, %g6)
127 ba,pt %xcc, kvmap_dtlb_load
131 /* %g6: TAG TARGET */
132 mov TLB_TAG_ACCESS, %g4
133 ldxa [%g4] ASI_DMMU, %g4
135 /* The kernel executes in context zero, therefore we do not
136 * need to clear the context ID bits out of %g4 here.
139 /* sun4v_dtlb_miss branches here with the missing virtual
140 * address already loaded into %g4
143 brgez,pn %g4, kvmap_dtlb_nonlinear
146 #ifdef CONFIG_DEBUG_PAGEALLOC
147 /* Index through the base page size TSB even for linear
148 * mappings when using page allocation debugging.
150 KERN_TSB_LOOKUP_TL1(%g4, %g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load)
152 /* Correct TAG_TARGET is already in %g6, check 4mb TSB. */
153 KERN_TSB4M_LOOKUP_TL1(%g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load)
155 /* Linear mapping TSB lookup failed. Fallthrough to kernel
156 * page table based lookup.
158 .globl kvmap_linear_patch
160 ba,a,pt %xcc, kvmap_linear_early
162 kvmap_dtlb_vmalloc_addr:
163 KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_dtlb_longpath)
165 TSB_LOCK_TAG(%g1, %g2, %g7)
166 TSB_WRITE(%g1, %g5, %g6)
168 /* fallthrough to TLB load */
172 661: stxa %g5, [%g0] ASI_DTLB_DATA_IN ! Reload TLB
174 .section .sun4v_2insn_patch, "ax"
180 /* For sun4v the ASI_DTLB_DATA_IN store and the retry
181 * instruction get nop'd out and we get here to branch
182 * to the sun4v tlb load code. The registers are setup
189 * The sun4v TLB load wants the PTE in %g3 so we fix that
192 ba,pt %xcc, sun4v_dtlb_load
195 #ifdef CONFIG_SPARSEMEM_VMEMMAP
197 KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_dtlb_longpath)
198 ba,a,pt %xcc, kvmap_dtlb_load
201 kvmap_dtlb_nonlinear:
202 /* Catch kernel NULL pointer derefs. */
203 sethi %hi(PAGE_SIZE), %g5
205 bleu,pn %xcc, kvmap_dtlb_longpath
208 #ifdef CONFIG_SPARSEMEM_VMEMMAP
209 /* Do not use the TSB for vmemmap. */
210 sethi %hi(VMEMMAP_BASE), %g5
211 ldx [%g5 + %lo(VMEMMAP_BASE)], %g5
213 bgeu,pn %xcc, kvmap_vmemmap
217 KERN_TSB_LOOKUP_TL1(%g4, %g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load)
220 sethi %hi(MODULES_VADDR), %g5
222 blu,pn %xcc, kvmap_dtlb_longpath
223 sethi %hi(VMALLOC_END), %g5
224 ldx [%g5 + %lo(VMALLOC_END)], %g5
226 bgeu,pn %xcc, kvmap_dtlb_longpath
230 sethi %hi(LOW_OBP_ADDRESS), %g5
232 blu,pn %xcc, kvmap_dtlb_vmalloc_addr
236 blu,pn %xcc, kvmap_dtlb_obp
238 ba,pt %xcc, kvmap_dtlb_vmalloc_addr
243 661: rdpr %pstate, %g5
244 wrpr %g5, PSTATE_AG | PSTATE_MG, %pstate
245 .section .sun4v_2insn_patch, "ax"
248 ldxa [%g0] ASI_SCRATCHPAD, %g5
254 661: mov TLB_TAG_ACCESS, %g4
255 ldxa [%g4] ASI_DMMU, %g5
256 .section .sun4v_2insn_patch, "ax"
258 ldx [%g5 + HV_FAULT_D_ADDR_OFFSET], %g5
262 /* The kernel executes in context zero, therefore we do not
263 * need to clear the context ID bits out of %g5 here.
266 be,pt %xcc, sparc64_realfault_common
267 mov FAULT_CODE_DTLB, %g4
268 ba,pt %xcc, winfix_trampoline