2 * Low-level SLB routines
4 * Copyright (C) 2004 David Gibson <dwg@au.ibm.com>, IBM
6 * Based on earlier C version:
7 * Dave Engebretsen and Mike Corrigan {engebret|mikejc}@us.ibm.com
8 * Copyright (c) 2001 Dave Engebretsen
9 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
17 #include <asm/processor.h>
18 #include <asm/ppc_asm.h>
19 #include <asm/asm-offsets.h>
20 #include <asm/cputable.h>
23 #include <asm/pgtable.h>
24 #include <asm/firmware.h>
26 /* void slb_allocate_realmode(unsigned long ea);
28 * Create an SLB entry for the given EA (user or kernel).
29 * r3 = faulting address, r13 = PACA
30 * r9, r10, r11 are clobbered by this function
31 * No other registers are examined or changed.
33 _GLOBAL(slb_allocate_realmode)
35 * check for bad kernel/user address
36 * (ea & ~REGION_MASK) >= PGTABLE_RANGE
38 rldicr. r9,r3,4,(63 - PGTABLE_EADDR_SIZE - 4)
41 srdi r9,r3,60 /* get region */
42 srdi r10,r3,SID_SHIFT /* get esid */
43 cmpldi cr7,r9,0xc /* cmp PAGE_OFFSET for later use */
45 /* r3 = address, r10 = esid, cr7 = <> PAGE_OFFSET */
46 blt cr7,0f /* user or kernel? */
48 /* kernel address: proto-VSID = ESID */
49 /* WARNING - MAGIC: we don't use the VSID 0xfffffffff, but
50 * this code will generate the protoVSID 0xfffffffff for the
51 * top segment. That's ok, the scramble below will translate
52 * it to VSID 0, which is reserved as a bad VSID - one which
53 * will never have any pages in it. */
55 /* Check if hitting the linear mapping or some other kernel space
59 /* Linear mapping encoding bits, the "li" instruction below will
60 * be patched by the kernel at boot
62 .globl slb_miss_kernel_load_linear
63 slb_miss_kernel_load_linear:
66 * context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1
69 addis r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@ha
70 addi r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@l
75 END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
79 #ifdef CONFIG_SPARSEMEM_VMEMMAP
80 /* Check virtual memmap region. To be patches at kernel boot */
83 .globl slb_miss_kernel_load_vmemmap
84 slb_miss_kernel_load_vmemmap:
88 #endif /* CONFIG_SPARSEMEM_VMEMMAP */
90 /* vmalloc mapping gets the encoding from the PACA as the mapping
91 * can be demoted from 64K -> 4K dynamically on some machines
94 cmpldi r11,(VMALLOC_SIZE >> 28) - 1
96 lhz r11,PACAVMALLOCSLLP(r13)
100 .globl slb_miss_kernel_load_io
101 slb_miss_kernel_load_io:
105 * context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1
108 addis r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@ha
109 addi r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@l
113 END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
117 /* when using slices, we extract the psize off the slice bitmaps
118 * and then we need to get the sllp encoding off the mmu_psize_defs
121 * XXX This is a bit inefficient especially for the normal case,
122 * so we should try to implement a fast path for the standard page
123 * size using the old sllp value so we avoid the array. We cannot
124 * really do dynamic patching unfortunately as processes might flip
125 * between 4k and 64k standard page size
127 #ifdef CONFIG_PPC_MM_SLICES
130 /* below SLICE_LOW_TOP */
134 * r9 is get_paca()->context.high_slices_psize[index], r11 is mask_index
136 srdi r11,r10,(SLICE_HIGH_SHIFT - SLICE_LOW_SHIFT + 1) /* index */
137 addi r9,r11,PACAHIGHSLICEPSIZE
138 lbzx r9,r13,r9 /* r9 is hpsizes[r11] */
139 /* r11 = (r10 >> (SLICE_HIGH_SHIFT - SLICE_LOW_SHIFT)) & 0x1 */
140 rldicl r11,r10,(64 - (SLICE_HIGH_SHIFT - SLICE_LOW_SHIFT)),63
146 * r9 is get_paca()->context.low_slices_psize, r11 is index
148 ld r9,PACALOWSLICESPSIZE(r13)
151 sldi r11,r11,2 /* index * 4 */
152 /* Extract the psize and multiply to get an array offset */
155 mulli r9,r9,MMUPSIZEDEFSIZE
157 /* Now get to the array and obtain the sllp
160 ld r11,mmu_psize_defs@got(r11)
162 ld r11,MMUPSIZESLLP(r11)
163 ori r11,r11,SLB_VSID_USER
165 /* paca context sllp already contains the SLB_VSID_USER bits */
166 lhz r11,PACACONTEXTSLLP(r13)
167 #endif /* CONFIG_PPC_MM_SLICES */
169 ld r9,PACACONTEXTID(r13)
172 bge slb_finish_load_1T
173 END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
177 li r10,0 /* BAD_VSID */
178 li r9,0 /* BAD_VSID */
179 li r11,SLB_VSID_USER /* flags don't much matter */
184 /* void slb_allocate_user(unsigned long ea);
186 * Create an SLB entry for the given EA (user or kernel).
187 * r3 = faulting address, r13 = PACA
188 * r9, r10, r11 are clobbered by this function
189 * No other registers are examined or changed.
191 * It is called with translation enabled in order to be able to walk the
192 * page tables. This is not currently used.
194 _GLOBAL(slb_allocate_user)
195 /* r3 = faulting address */
196 srdi r10,r3,28 /* get esid */
198 crset 4*cr7+lt /* set "user" flag for later */
200 /* check if we fit in the range covered by the pagetables*/
201 srdi. r9,r3,PGTABLE_EADDR_SIZE
202 crnot 4*cr0+eq,4*cr0+eq
205 /* now we need to get to the page tables in order to get the page
206 * size encoding from the PMD. In the future, we'll be able to deal
207 * with 1T segments too by getting the encoding from the PGD instead
212 rlwinm r11,r10,8,25,28
213 ldx r9,r9,r11 /* get pgd_t */
216 rlwinm r11,r10,3,17,28
217 ldx r9,r9,r11 /* get pmd_t */
221 /* build vsid flags */
222 andi. r11,r9,SLB_VSID_LLP
223 ori r11,r11,SLB_VSID_USER
225 /* get context to calculate proto-VSID */
226 ld r9,PACACONTEXTID(r13)
227 /* fall through slb_finish_load */
229 #endif /* __DISABLED__ */
233 * Finish loading of an SLB entry and return
235 * r3 = EA, r9 = context, r10 = ESID, r11 = flags, clobbers r9, cr7 = <> PAGE_OFFSET
238 rldimi r10,r9,ESID_BITS,0
239 ASM_VSID_SCRAMBLE(r10,r9,256M)
241 * bits above VSID_BITS_256M need to be ignored from r10
242 * also combine VSID and flags
244 rldimi r11,r10,SLB_VSID_SHIFT,(64 - (SLB_VSID_SHIFT + VSID_BITS_256M))
246 /* r3 = EA, r11 = VSID data */
248 * Find a slot, round robin. Previously we tried to find a
249 * free slot first but that took too long. Unfortunately we
250 * dont have any LRU information to help us choose a slot.
253 7: ld r10,PACASTABRR(r13)
255 /* This gets soft patched on boot. */
256 .globl slb_compare_rr_to_size
257 slb_compare_rr_to_size:
261 li r10,SLB_NUM_BOLTED
264 std r10,PACASTABRR(r13)
267 rldimi r3,r10,0,36 /* r3= EA[0:35] | entry */
268 oris r10,r3,SLB_ESID_V@h /* r3 |= SLB_ESID_V */
270 /* r3 = ESID data, r11 = VSID data */
273 * No need for an isync before or after this slbmte. The exception
274 * we enter with and the rfid we exit with are context synchronizing.
278 /* we're done for kernel addresses */
279 crclr 4*cr0+eq /* set result to "success" */
282 /* Update the slb cache */
283 lhz r3,PACASLBCACHEPTR(r13) /* offset = paca->slb_cache_ptr */
284 cmpldi r3,SLB_CACHE_ENTRIES
287 /* still room in the slb cache */
288 sldi r11,r3,2 /* r11 = offset * sizeof(u32) */
289 srdi r10,r10,28 /* get the 36 bits of the ESID */
290 add r11,r11,r13 /* r11 = (u32 *)paca + offset */
291 stw r10,PACASLBCACHE(r11) /* paca->slb_cache[offset] = esid */
292 addi r3,r3,1 /* offset++ */
294 1: /* offset >= SLB_CACHE_ENTRIES */
295 li r3,SLB_CACHE_ENTRIES+1
297 sth r3,PACASLBCACHEPTR(r13) /* paca->slb_cache_ptr = offset */
298 crclr 4*cr0+eq /* set result to "success" */
302 * Finish loading of a 1T SLB entry (for the kernel linear mapping) and return.
304 * r3 = EA, r9 = context, r10 = ESID(256MB), r11 = flags, clobbers r9
307 srdi r10,r10,(SID_SHIFT_1T - SID_SHIFT) /* get 1T ESID */
308 rldimi r10,r9,ESID_BITS_1T,0
309 ASM_VSID_SCRAMBLE(r10,r9,1T)
311 * bits above VSID_BITS_1T need to be ignored from r10
312 * also combine VSID and flags
314 rldimi r11,r10,SLB_VSID_SHIFT_1T,(64 - (SLB_VSID_SHIFT_1T + VSID_BITS_1T))
315 li r10,MMU_SEGSIZE_1T
316 rldimi r11,r10,SLB_VSID_SSIZE_SHIFT,0 /* insert segment size */
318 /* r3 = EA, r11 = VSID data */
319 clrrdi r3,r3,SID_SHIFT_1T /* clear out non-ESID bits */