2 * PowerPC64 port by Mike Corrigan and Dave Engebretsen
3 * {mikejc|engebret}@us.ibm.com
5 * Copyright (c) 2000 Mike Corrigan <mikejc@us.ibm.com>
7 * SMP scalability work:
8 * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
13 * PowerPC Hashed Page Table functions
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
23 #include <linux/config.h>
24 #include <linux/spinlock.h>
25 #include <linux/errno.h>
26 #include <linux/sched.h>
27 #include <linux/proc_fs.h>
28 #include <linux/stat.h>
29 #include <linux/sysctl.h>
30 #include <linux/ctype.h>
31 #include <linux/cache.h>
32 #include <linux/init.h>
33 #include <linux/signal.h>
35 #include <asm/ppcdebug.h>
36 #include <asm/processor.h>
37 #include <asm/pgtable.h>
39 #include <asm/mmu_context.h>
41 #include <asm/types.h>
42 #include <asm/system.h>
43 #include <asm/uaccess.h>
44 #include <asm/machdep.h>
46 #include <asm/abs_addr.h>
47 #include <asm/tlbflush.h>
51 #include <asm/cacheflush.h>
52 #include <asm/cputable.h>
53 #include <asm/abs_addr.h>
54 #include <asm/sections.h>
57 #define DBG(fmt...) udbg_printf(fmt)
63 * Note: pte --> Linux PTE
64 * HPTE --> PowerPC Hashed Page Table Entry
67 * htab_initialize is called with the MMU off (of course), but
68 * the kernel has been copied down to zero so it can directly
69 * reference global data. At this point it is very difficult
70 * to print debug info.
75 extern unsigned long dart_tablebase
;
76 #endif /* CONFIG_U3_DART */
79 unsigned long htab_hash_mask
;
81 extern unsigned long _SDR1
;
86 static inline void loop_forever(void)
88 volatile unsigned long x
= 1;
93 #ifdef CONFIG_PPC_MULTIPLATFORM
94 static inline void create_pte_mapping(unsigned long start
, unsigned long end
,
95 unsigned long mode
, int large
)
99 unsigned long tmp_mode
;
106 for (addr
= start
; addr
< end
; addr
+= step
) {
107 unsigned long vpn
, hash
, hpteg
;
108 unsigned long vsid
= get_kernel_vsid(addr
);
109 unsigned long va
= (vsid
<< 28) | (addr
& 0xfffffff);
113 vpn
= va
>> HPAGE_SHIFT
;
115 vpn
= va
>> PAGE_SHIFT
;
120 /* Make non-kernel text non-executable */
121 if (!in_kernel_text(addr
))
122 tmp_mode
= mode
| HW_NO_EXEC
;
124 hash
= hpt_hash(vpn
, large
);
126 hpteg
= ((hash
& htab_hash_mask
) * HPTES_PER_GROUP
);
128 #ifdef CONFIG_PPC_PSERIES
129 if (systemcfg
->platform
& PLATFORM_LPAR
)
130 ret
= pSeries_lpar_hpte_insert(hpteg
, va
,
131 virt_to_abs(addr
) >> PAGE_SHIFT
,
132 0, tmp_mode
, 1, large
);
134 #endif /* CONFIG_PPC_PSERIES */
135 ret
= native_hpte_insert(hpteg
, va
,
136 virt_to_abs(addr
) >> PAGE_SHIFT
,
137 0, tmp_mode
, 1, large
);
140 ppc64_terminate_msg(0x20, "create_pte_mapping");
146 void __init
htab_initialize(void)
148 unsigned long table
, htab_size_bytes
;
149 unsigned long pteg_count
;
150 unsigned long mode_rw
;
151 int i
, use_largepages
= 0;
152 unsigned long base
= 0, size
= 0;
153 extern unsigned long tce_alloc_start
, tce_alloc_end
;
155 DBG(" -> htab_initialize()\n");
158 * Calculate the required size of the htab. We want the number of
159 * PTEGs to equal one half the number of real pages.
161 htab_size_bytes
= 1UL << ppc64_pft_size
;
162 pteg_count
= htab_size_bytes
>> 7;
164 /* For debug, make the HTAB 1/8 as big as it normally would be. */
165 ifppcdebug(PPCDBG_HTABSIZE
) {
167 htab_size_bytes
= pteg_count
<< 7;
170 htab_hash_mask
= pteg_count
- 1;
172 if (systemcfg
->platform
& PLATFORM_LPAR
) {
173 /* Using a hypervisor which owns the htab */
177 /* Find storage for the HPT. Must be contiguous in
178 * the absolute address space.
180 table
= lmb_alloc(htab_size_bytes
, htab_size_bytes
);
182 DBG("Hash table allocated at %lx, size: %lx\n", table
,
186 ppc64_terminate_msg(0x20, "hpt space");
189 htab_address
= abs_to_virt(table
);
191 /* htab absolute addr + encoded htabsize */
192 _SDR1
= table
+ __ilog2(pteg_count
) - 11;
194 /* Initialize the HPT with no entries */
195 memset((void *)table
, 0, htab_size_bytes
);
198 mode_rw
= _PAGE_ACCESSED
| _PAGE_COHERENT
| PP_RWXX
;
200 /* On U3 based machines, we need to reserve the DART area and
201 * _NOT_ map it to avoid cache paradoxes as it's remapped non
204 if (cpu_has_feature(CPU_FTR_16M_PAGE
))
207 /* create bolted the linear mapping in the hash table */
208 for (i
=0; i
< lmb
.memory
.cnt
; i
++) {
209 base
= lmb
.memory
.region
[i
].physbase
+ KERNELBASE
;
210 size
= lmb
.memory
.region
[i
].size
;
212 DBG("creating mapping for region: %lx : %lx\n", base
, size
);
214 #ifdef CONFIG_U3_DART
215 /* Do not map the DART space. Fortunately, it will be aligned
216 * in such a way that it will not cross two lmb regions and will
217 * fit within a single 16Mb page.
218 * The DART space is assumed to be a full 16Mb region even if we
219 * only use 2Mb of that space. We will use more of it later for
220 * AGP GART. We have to use a full 16Mb large page.
222 DBG("DART base: %lx\n", dart_tablebase
);
224 if (dart_tablebase
!= 0 && dart_tablebase
>= base
225 && dart_tablebase
< (base
+ size
)) {
226 if (base
!= dart_tablebase
)
227 create_pte_mapping(base
, dart_tablebase
, mode_rw
,
229 if ((base
+ size
) > (dart_tablebase
+ 16*MB
))
230 create_pte_mapping(dart_tablebase
+ 16*MB
, base
+ size
,
231 mode_rw
, use_largepages
);
234 #endif /* CONFIG_U3_DART */
235 create_pte_mapping(base
, base
+ size
, mode_rw
, use_largepages
);
239 * If we have a memory_limit and we've allocated TCEs then we need to
240 * explicitly map the TCE area at the top of RAM. We also cope with the
241 * case that the TCEs start below memory_limit.
242 * tce_alloc_start/end are 16MB aligned so the mapping should work
243 * for either 4K or 16MB pages.
245 if (tce_alloc_start
) {
246 tce_alloc_start
+= KERNELBASE
;
247 tce_alloc_end
+= KERNELBASE
;
249 if (base
+ size
>= tce_alloc_start
)
250 tce_alloc_start
= base
+ size
+ 1;
252 create_pte_mapping(tce_alloc_start
, tce_alloc_end
,
253 mode_rw
, use_largepages
);
256 DBG(" <- htab_initialize()\n");
260 #endif /* CONFIG_PPC_MULTIPLATFORM */
263 * Called by asm hashtable.S for doing lazy icache flush
265 unsigned int hash_page_do_lazy_icache(unsigned int pp
, pte_t pte
, int trap
)
269 if (!pfn_valid(pte_pfn(pte
)))
272 page
= pte_page(pte
);
275 if (!test_bit(PG_arch_1
, &page
->flags
) && !PageReserved(page
)) {
277 __flush_dcache_icache(page_address(page
));
278 set_bit(PG_arch_1
, &page
->flags
);
287 * 1 - normal page fault
288 * -1 - critical hash insertion error
290 int hash_page(unsigned long ea
, unsigned long access
, unsigned long trap
)
294 struct mm_struct
*mm
;
301 if ((ea
& ~REGION_MASK
) > EADDR_MASK
)
304 switch (REGION_ID(ea
)) {
311 vsid
= get_vsid(mm
->context
.id
, ea
);
315 vsid
= get_kernel_vsid(ea
);
317 case VMALLOC_REGION_ID
:
319 vsid
= get_kernel_vsid(ea
);
322 case KERNEL_REGION_ID
:
324 * Should never get here - entire 0xC0... region is bolted.
325 * Send the problem up to do_page_fault
330 * Send the problem up to do_page_fault
341 tmp
= cpumask_of_cpu(smp_processor_id());
342 if (user_region
&& cpus_equal(mm
->cpu_vm_mask
, tmp
))
345 /* Is this a huge page ? */
346 if (unlikely(in_hugepage_area(mm
->context
, ea
)))
347 ret
= hash_huge_page(mm
, access
, ea
, vsid
, local
);
349 ptep
= find_linux_pte(pgdir
, ea
);
352 ret
= __hash_page(ea
, access
, vsid
, ptep
, trap
, local
);
358 void flush_hash_page(unsigned long context
, unsigned long ea
, pte_t pte
,
361 unsigned long vsid
, vpn
, va
, hash
, secondary
, slot
;
362 unsigned long huge
= pte_huge(pte
);
365 vsid
= get_vsid(context
, ea
);
367 vsid
= get_kernel_vsid(ea
);
369 va
= (vsid
<< 28) | (ea
& 0x0fffffff);
371 vpn
= va
>> HPAGE_SHIFT
;
373 vpn
= va
>> PAGE_SHIFT
;
374 hash
= hpt_hash(vpn
, huge
);
375 secondary
= (pte_val(pte
) & _PAGE_SECONDARY
) >> 15;
378 slot
= (hash
& htab_hash_mask
) * HPTES_PER_GROUP
;
379 slot
+= (pte_val(pte
) & _PAGE_GROUP_IX
) >> 12;
381 ppc_md
.hpte_invalidate(slot
, va
, huge
, local
);
384 void flush_hash_range(unsigned long context
, unsigned long number
, int local
)
386 if (ppc_md
.flush_hash_range
) {
387 ppc_md
.flush_hash_range(context
, number
, local
);
390 struct ppc64_tlb_batch
*batch
= &__get_cpu_var(ppc64_tlb_batch
);
392 for (i
= 0; i
< number
; i
++)
393 flush_hash_page(context
, batch
->addr
[i
], batch
->pte
[i
],
398 static inline void make_bl(unsigned int *insn_addr
, void *func
)
400 unsigned long funcp
= *((unsigned long *)func
);
401 int offset
= funcp
- (unsigned long)insn_addr
;
403 *insn_addr
= (unsigned int)(0x48000001 | (offset
& 0x03fffffc));
404 flush_icache_range((unsigned long)insn_addr
, 4+
405 (unsigned long)insn_addr
);
409 * low_hash_fault is called when we the low level hash code failed
410 * to instert a PTE due to an hypervisor error
412 void low_hash_fault(struct pt_regs
*regs
, unsigned long address
)
414 if (user_mode(regs
)) {
417 info
.si_signo
= SIGBUS
;
419 info
.si_code
= BUS_ADRERR
;
420 info
.si_addr
= (void __user
*)address
;
421 force_sig_info(SIGBUS
, &info
, current
);
424 bad_page_fault(regs
, address
, SIGBUS
);
427 void __init
htab_finish_init(void)
429 extern unsigned int *htab_call_hpte_insert1
;
430 extern unsigned int *htab_call_hpte_insert2
;
431 extern unsigned int *htab_call_hpte_remove
;
432 extern unsigned int *htab_call_hpte_updatepp
;
434 make_bl(htab_call_hpte_insert1
, ppc_md
.hpte_insert
);
435 make_bl(htab_call_hpte_insert2
, ppc_md
.hpte_insert
);
436 make_bl(htab_call_hpte_remove
, ppc_md
.hpte_remove
);
437 make_bl(htab_call_hpte_updatepp
, ppc_md
.hpte_updatepp
);