1 /* arch/sparc64/kernel/ktlb.S: Kernel mapping TLB miss handling.
3 * Copyright (C) 1995, 1997, 2005, 2008 David S. Miller <davem@davemloft.net>
4 * Copyright (C) 1996 Eddie C. Dost (ecd@brainaid.de)
5 * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx)
6 * Copyright (C) 1996,98,99 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
12 #include <asm/pgtable.h>
20 mov TLB_TAG_ACCESS, %g4
21 ldxa [%g4] ASI_IMMU, %g4
23 /* sun4v_itlb_miss branches here with the missing virtual
24 * address already loaded into %g4
28 /* Catch kernel NULL pointer calls. */
29 sethi %hi(PAGE_SIZE), %g5
31 blu,pn %xcc, kvmap_itlb_longpath
34 KERN_TSB_LOOKUP_TL1(%g4, %g6, %g5, %g1, %g2, %g3, kvmap_itlb_load)
37 sethi %hi(LOW_OBP_ADDRESS), %g5
39 blu,pn %xcc, kvmap_itlb_vmalloc_addr
43 blu,pn %xcc, kvmap_itlb_obp
46 kvmap_itlb_vmalloc_addr:
47 KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_itlb_longpath)
49 TSB_LOCK_TAG(%g1, %g2, %g7)
51 /* Load and check PTE. */
52 ldxa [%g5] ASI_PHYS_USE_EC, %g5
54 sllx %g7, TSB_TAG_INVALID_BIT, %g7
55 brgez,a,pn %g5, kvmap_itlb_longpath
58 TSB_WRITE(%g1, %g5, %g6)
60 /* fallthrough to TLB load */
64 661: stxa %g5, [%g0] ASI_ITLB_DATA_IN
66 .section .sun4v_2insn_patch, "ax"
72 /* For sun4v the ASI_ITLB_DATA_IN store and the retry
73 * instruction get nop'd out and we get here to branch
74 * to the sun4v tlb load code. The registers are setup
81 * The sun4v TLB load wants the PTE in %g3 so we fix that
84 ba,pt %xcc, sun4v_itlb_load
89 661: rdpr %pstate, %g5
90 wrpr %g5, PSTATE_AG | PSTATE_MG, %pstate
91 .section .sun4v_2insn_patch, "ax"
98 ba,pt %xcc, sparc64_realfault_common
99 mov FAULT_CODE_ITLB, %g4
102 OBP_TRANS_LOOKUP(%g4, %g5, %g2, %g3, kvmap_itlb_longpath)
104 TSB_LOCK_TAG(%g1, %g2, %g7)
106 TSB_WRITE(%g1, %g5, %g6)
108 ba,pt %xcc, kvmap_itlb_load
112 OBP_TRANS_LOOKUP(%g4, %g5, %g2, %g3, kvmap_dtlb_longpath)
114 TSB_LOCK_TAG(%g1, %g2, %g7)
116 TSB_WRITE(%g1, %g5, %g6)
118 ba,pt %xcc, kvmap_dtlb_load
122 kvmap_dtlb_tsb4m_load:
123 TSB_LOCK_TAG(%g1, %g2, %g7)
124 TSB_WRITE(%g1, %g5, %g6)
125 ba,pt %xcc, kvmap_dtlb_load
129 /* %g6: TAG TARGET */
130 mov TLB_TAG_ACCESS, %g4
131 ldxa [%g4] ASI_DMMU, %g4
133 /* sun4v_dtlb_miss branches here with the missing virtual
134 * address already loaded into %g4
137 brgez,pn %g4, kvmap_dtlb_nonlinear
140 #ifdef CONFIG_DEBUG_PAGEALLOC
141 /* Index through the base page size TSB even for linear
142 * mappings when using page allocation debugging.
144 KERN_TSB_LOOKUP_TL1(%g4, %g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load)
146 /* Correct TAG_TARGET is already in %g6, check 4mb TSB. */
147 KERN_TSB4M_LOOKUP_TL1(%g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load)
149 /* TSB entry address left in %g1, lookup linear PTE.
150 * Must preserve %g1 and %g6 (TAG).
152 kvmap_dtlb_tsb4m_miss:
153 /* Clear the PAGE_OFFSET top virtual bits, shift
154 * down to get PFN, and make sure PFN is in range.
158 /* Check to see if we know about valid memory at the 4MB
159 * chunk this physical address will reside within.
161 srlx %g5, 21 + 41, %g2
162 brnz,pn %g2, kvmap_dtlb_longpath
165 /* This unconditional branch and delay-slot nop gets patched
166 * by the sethi sequence once the bitmap is properly setup.
168 .globl valid_addr_bitmap_insn
169 valid_addr_bitmap_insn:
173 .globl valid_addr_bitmap_patch
174 valid_addr_bitmap_patch:
175 sethi %hi(sparc64_valid_addr_bitmap), %g7
176 or %g7, %lo(sparc64_valid_addr_bitmap), %g7
179 srlx %g5, 21 + 22, %g2
187 be,pn %xcc, kvmap_dtlb_longpath
189 2: sethi %hi(kpte_linear_bitmap), %g2
191 /* Get the 256MB physical address index. */
193 or %g2, %lo(kpte_linear_bitmap), %g2
194 srlx %g5, 21 + 28, %g5
195 and %g5, (32 - 1), %g7
197 /* Divide by 32 to get the offset into the bitmask. */
202 /* kern_linear_pte_xor[(mask >> shift) & 3)] */
205 sethi %hi(kern_linear_pte_xor), %g5
207 or %g5, %lo(kern_linear_pte_xor), %g5
211 .globl kvmap_linear_patch
213 ba,pt %xcc, kvmap_dtlb_tsb4m_load
216 kvmap_dtlb_vmalloc_addr:
217 KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_dtlb_longpath)
219 TSB_LOCK_TAG(%g1, %g2, %g7)
221 /* Load and check PTE. */
222 ldxa [%g5] ASI_PHYS_USE_EC, %g5
224 sllx %g7, TSB_TAG_INVALID_BIT, %g7
225 brgez,a,pn %g5, kvmap_dtlb_longpath
228 TSB_WRITE(%g1, %g5, %g6)
230 /* fallthrough to TLB load */
234 661: stxa %g5, [%g0] ASI_DTLB_DATA_IN ! Reload TLB
236 .section .sun4v_2insn_patch, "ax"
242 /* For sun4v the ASI_DTLB_DATA_IN store and the retry
243 * instruction get nop'd out and we get here to branch
244 * to the sun4v tlb load code. The registers are setup
251 * The sun4v TLB load wants the PTE in %g3 so we fix that
254 ba,pt %xcc, sun4v_dtlb_load
257 #ifdef CONFIG_SPARSEMEM_VMEMMAP
261 sethi %hi(vmemmap_table), %g1
263 or %g1, %lo(vmemmap_table), %g1
264 ba,pt %xcc, kvmap_dtlb_load
268 kvmap_dtlb_nonlinear:
269 /* Catch kernel NULL pointer derefs. */
270 sethi %hi(PAGE_SIZE), %g5
272 bleu,pn %xcc, kvmap_dtlb_longpath
275 #ifdef CONFIG_SPARSEMEM_VMEMMAP
276 /* Do not use the TSB for vmemmap. */
277 mov (VMEMMAP_BASE >> 40), %g5
280 bgeu,pn %xcc, kvmap_vmemmap
284 KERN_TSB_LOOKUP_TL1(%g4, %g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load)
287 sethi %hi(MODULES_VADDR), %g5
289 blu,pn %xcc, kvmap_dtlb_longpath
290 mov (VMALLOC_END >> 40), %g5
293 bgeu,pn %xcc, kvmap_dtlb_longpath
297 sethi %hi(LOW_OBP_ADDRESS), %g5
299 blu,pn %xcc, kvmap_dtlb_vmalloc_addr
303 blu,pn %xcc, kvmap_dtlb_obp
305 ba,pt %xcc, kvmap_dtlb_vmalloc_addr
310 661: rdpr %pstate, %g5
311 wrpr %g5, PSTATE_AG | PSTATE_MG, %pstate
312 .section .sun4v_2insn_patch, "ax"
315 ldxa [%g0] ASI_SCRATCHPAD, %g5
321 661: mov TLB_TAG_ACCESS, %g4
322 ldxa [%g4] ASI_DMMU, %g5
323 .section .sun4v_2insn_patch, "ax"
325 ldx [%g5 + HV_FAULT_D_ADDR_OFFSET], %g5
329 be,pt %xcc, sparc64_realfault_common
330 mov FAULT_CODE_DTLB, %g4
331 ba,pt %xcc, winfix_trampoline