2 * PowerPC64 port by Mike Corrigan and Dave Engebretsen
3 * {mikejc|engebret}@us.ibm.com
5 * Copyright (c) 2000 Mike Corrigan <mikejc@us.ibm.com>
7 * SMP scalability work:
8 * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
13 * PowerPC Hashed Page Table functions
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
24 #include <linux/config.h>
25 #include <linux/spinlock.h>
26 #include <linux/errno.h>
27 #include <linux/sched.h>
28 #include <linux/proc_fs.h>
29 #include <linux/stat.h>
30 #include <linux/sysctl.h>
31 #include <linux/ctype.h>
32 #include <linux/cache.h>
33 #include <linux/init.h>
34 #include <linux/signal.h>
36 #include <asm/processor.h>
37 #include <asm/pgtable.h>
39 #include <asm/mmu_context.h>
41 #include <asm/types.h>
42 #include <asm/system.h>
43 #include <asm/uaccess.h>
44 #include <asm/machdep.h>
46 #include <asm/abs_addr.h>
47 #include <asm/tlbflush.h>
51 #include <asm/cacheflush.h>
52 #include <asm/cputable.h>
53 #include <asm/abs_addr.h>
54 #include <asm/sections.h>
57 #define DBG(fmt...) udbg_printf(fmt)
63 #define DBG_LOW(fmt...) udbg_printf(fmt)
65 #define DBG_LOW(fmt...)
72 * Note: pte --> Linux PTE
73 * HPTE --> PowerPC Hashed Page Table Entry
76 * htab_initialize is called with the MMU off (of course), but
77 * the kernel has been copied down to zero so it can directly
78 * reference global data. At this point it is very difficult
79 * to print debug info.
84 extern unsigned long dart_tablebase
;
85 #endif /* CONFIG_U3_DART */
87 static unsigned long _SDR1
;
88 struct mmu_psize_def mmu_psize_defs
[MMU_PAGE_COUNT
];
91 unsigned long htab_size_bytes
;
92 unsigned long htab_hash_mask
;
93 int mmu_linear_psize
= MMU_PAGE_4K
;
94 int mmu_virtual_psize
= MMU_PAGE_4K
;
95 #ifdef CONFIG_HUGETLB_PAGE
96 int mmu_huge_psize
= MMU_PAGE_16M
;
97 unsigned int HPAGE_SHIFT
;
100 /* There are definitions of page sizes arrays to be used when none
101 * is provided by the firmware.
104 /* Pre-POWER4 CPUs (4k pages only)
106 struct mmu_psize_def mmu_psize_defaults_old
[] = {
116 /* POWER4, GPUL, POWER5
118 * Support for 16Mb large pages
120 struct mmu_psize_def mmu_psize_defaults_gp
[] = {
138 int htab_bolt_mapping(unsigned long vstart
, unsigned long vend
,
139 unsigned long pstart
, unsigned long mode
, int psize
)
141 unsigned long vaddr
, paddr
;
142 unsigned int step
, shift
;
143 unsigned long tmp_mode
;
146 shift
= mmu_psize_defs
[psize
].shift
;
149 for (vaddr
= vstart
, paddr
= pstart
; vaddr
< vend
;
150 vaddr
+= step
, paddr
+= step
) {
151 unsigned long vpn
, hash
, hpteg
;
152 unsigned long vsid
= get_kernel_vsid(vaddr
);
153 unsigned long va
= (vsid
<< 28) | (vaddr
& 0x0fffffff);
158 /* Make non-kernel text non-executable */
159 if (!in_kernel_text(vaddr
))
160 tmp_mode
= mode
| HPTE_R_N
;
162 hash
= hpt_hash(va
, shift
);
163 hpteg
= ((hash
& htab_hash_mask
) * HPTES_PER_GROUP
);
165 /* The crap below can be cleaned once ppd_md.probe() can
166 * set up the hash callbacks, thus we can just used the
167 * normal insert callback here.
169 #ifdef CONFIG_PPC_ISERIES
170 if (machine_is(iseries
))
171 ret
= iSeries_hpte_insert(hpteg
, va
,
178 #ifdef CONFIG_PPC_PSERIES
179 if (machine_is(pseries
) && firmware_has_feature(FW_FEATURE_LPAR
))
180 ret
= pSeries_lpar_hpte_insert(hpteg
, va
,
187 #ifdef CONFIG_PPC_MULTIPLATFORM
188 ret
= native_hpte_insert(hpteg
, va
,
190 tmp_mode
, HPTE_V_BOLTED
,
196 return ret
< 0 ? ret
: 0;
199 static int __init
htab_dt_scan_page_sizes(unsigned long node
,
200 const char *uname
, int depth
,
203 char *type
= of_get_flat_dt_prop(node
, "device_type", NULL
);
205 unsigned long size
= 0;
207 /* We are scanning "cpu" nodes only */
208 if (type
== NULL
|| strcmp(type
, "cpu") != 0)
211 prop
= (u32
*)of_get_flat_dt_prop(node
,
212 "ibm,segment-page-sizes", &size
);
214 DBG("Page sizes from device-tree:\n");
216 cur_cpu_spec
->cpu_features
&= ~(CPU_FTR_16M_PAGE
);
218 unsigned int shift
= prop
[0];
219 unsigned int slbenc
= prop
[1];
220 unsigned int lpnum
= prop
[2];
221 unsigned int lpenc
= 0;
222 struct mmu_psize_def
*def
;
225 size
-= 3; prop
+= 3;
226 while(size
> 0 && lpnum
) {
227 if (prop
[0] == shift
)
229 prop
+= 2; size
-= 2;
244 cur_cpu_spec
->cpu_features
|= CPU_FTR_16M_PAGE
;
252 def
= &mmu_psize_defs
[idx
];
257 def
->avpnm
= (1 << (shift
- 23)) - 1;
260 /* We don't know for sure what's up with tlbiel, so
261 * for now we only set it for 4K and 64K pages
263 if (idx
== MMU_PAGE_4K
|| idx
== MMU_PAGE_64K
)
268 DBG(" %d: shift=%02x, sllp=%04x, avpnm=%08x, "
269 "tlbiel=%d, penc=%d\n",
270 idx
, shift
, def
->sllp
, def
->avpnm
, def
->tlbiel
,
279 static void __init
htab_init_page_sizes(void)
283 /* Default to 4K pages only */
284 memcpy(mmu_psize_defs
, mmu_psize_defaults_old
,
285 sizeof(mmu_psize_defaults_old
));
288 * Try to find the available page sizes in the device-tree
290 rc
= of_scan_flat_dt(htab_dt_scan_page_sizes
, NULL
);
291 if (rc
!= 0) /* Found */
295 * Not in the device-tree, let's fallback on known size
296 * list for 16M capable GP & GR
298 if (cpu_has_feature(CPU_FTR_16M_PAGE
) && !machine_is(iseries
))
299 memcpy(mmu_psize_defs
, mmu_psize_defaults_gp
,
300 sizeof(mmu_psize_defaults_gp
));
303 * Pick a size for the linear mapping. Currently, we only support
304 * 16M, 1M and 4K which is the default
306 if (mmu_psize_defs
[MMU_PAGE_16M
].shift
)
307 mmu_linear_psize
= MMU_PAGE_16M
;
308 else if (mmu_psize_defs
[MMU_PAGE_1M
].shift
)
309 mmu_linear_psize
= MMU_PAGE_1M
;
312 * Pick a size for the ordinary pages. Default is 4K, we support
313 * 64K if cache inhibited large pages are supported by the
316 #ifdef CONFIG_PPC_64K_PAGES
317 if (mmu_psize_defs
[MMU_PAGE_64K
].shift
&&
318 cpu_has_feature(CPU_FTR_CI_LARGE_PAGE
))
319 mmu_virtual_psize
= MMU_PAGE_64K
;
322 printk(KERN_INFO
"Page orders: linear mapping = %d, others = %d\n",
323 mmu_psize_defs
[mmu_linear_psize
].shift
,
324 mmu_psize_defs
[mmu_virtual_psize
].shift
);
326 #ifdef CONFIG_HUGETLB_PAGE
327 /* Init large page size. Currently, we pick 16M or 1M depending
328 * on what is available
330 if (mmu_psize_defs
[MMU_PAGE_16M
].shift
)
331 mmu_huge_psize
= MMU_PAGE_16M
;
332 /* With 4k/4level pagetables, we can't (for now) cope with a
333 * huge page size < PMD_SIZE */
334 else if (mmu_psize_defs
[MMU_PAGE_1M
].shift
)
335 mmu_huge_psize
= MMU_PAGE_1M
;
337 /* Calculate HPAGE_SHIFT and sanity check it */
338 if (mmu_psize_defs
[mmu_huge_psize
].shift
> MIN_HUGEPTE_SHIFT
&&
339 mmu_psize_defs
[mmu_huge_psize
].shift
< SID_SHIFT
)
340 HPAGE_SHIFT
= mmu_psize_defs
[mmu_huge_psize
].shift
;
342 HPAGE_SHIFT
= 0; /* No huge pages dude ! */
343 #endif /* CONFIG_HUGETLB_PAGE */
346 static int __init
htab_dt_scan_pftsize(unsigned long node
,
347 const char *uname
, int depth
,
350 char *type
= of_get_flat_dt_prop(node
, "device_type", NULL
);
353 /* We are scanning "cpu" nodes only */
354 if (type
== NULL
|| strcmp(type
, "cpu") != 0)
357 prop
= (u32
*)of_get_flat_dt_prop(node
, "ibm,pft-size", NULL
);
359 /* pft_size[0] is the NUMA CEC cookie */
360 ppc64_pft_size
= prop
[1];
366 static unsigned long __init
htab_get_table_size(void)
368 unsigned long mem_size
, rnd_mem_size
, pteg_count
;
370 /* If hash size isn't already provided by the platform, we try to
371 * retrieve it from the device-tree. If it's not there neither, we
372 * calculate it now based on the total RAM size
374 if (ppc64_pft_size
== 0)
375 of_scan_flat_dt(htab_dt_scan_pftsize
, NULL
);
377 return 1UL << ppc64_pft_size
;
379 /* round mem_size up to next power of 2 */
380 mem_size
= lmb_phys_mem_size();
381 rnd_mem_size
= 1UL << __ilog2(mem_size
);
382 if (rnd_mem_size
< mem_size
)
386 pteg_count
= max(rnd_mem_size
>> (12 + 1), 1UL << 11);
388 return pteg_count
<< 7;
391 #ifdef CONFIG_MEMORY_HOTPLUG
392 void create_section_mapping(unsigned long start
, unsigned long end
)
394 BUG_ON(htab_bolt_mapping(start
, end
, __pa(start
),
395 _PAGE_ACCESSED
| _PAGE_DIRTY
| _PAGE_COHERENT
| PP_RWXX
,
398 #endif /* CONFIG_MEMORY_HOTPLUG */
400 void __init
htab_initialize(void)
403 unsigned long pteg_count
;
404 unsigned long mode_rw
;
405 unsigned long base
= 0, size
= 0;
408 extern unsigned long tce_alloc_start
, tce_alloc_end
;
410 DBG(" -> htab_initialize()\n");
412 /* Initialize page sizes */
413 htab_init_page_sizes();
416 * Calculate the required size of the htab. We want the number of
417 * PTEGs to equal one half the number of real pages.
419 htab_size_bytes
= htab_get_table_size();
420 pteg_count
= htab_size_bytes
>> 7;
422 htab_hash_mask
= pteg_count
- 1;
424 if (firmware_has_feature(FW_FEATURE_LPAR
)) {
425 /* Using a hypervisor which owns the htab */
429 /* Find storage for the HPT. Must be contiguous in
430 * the absolute address space.
432 table
= lmb_alloc(htab_size_bytes
, htab_size_bytes
);
434 DBG("Hash table allocated at %lx, size: %lx\n", table
,
437 htab_address
= abs_to_virt(table
);
439 /* htab absolute addr + encoded htabsize */
440 _SDR1
= table
+ __ilog2(pteg_count
) - 11;
442 /* Initialize the HPT with no entries */
443 memset((void *)table
, 0, htab_size_bytes
);
446 mtspr(SPRN_SDR1
, _SDR1
);
449 mode_rw
= _PAGE_ACCESSED
| _PAGE_DIRTY
| _PAGE_COHERENT
| PP_RWXX
;
451 /* On U3 based machines, we need to reserve the DART area and
452 * _NOT_ map it to avoid cache paradoxes as it's remapped non
456 /* create bolted the linear mapping in the hash table */
457 for (i
=0; i
< lmb
.memory
.cnt
; i
++) {
458 base
= (unsigned long)__va(lmb
.memory
.region
[i
].base
);
459 size
= lmb
.memory
.region
[i
].size
;
461 DBG("creating mapping for region: %lx : %lx\n", base
, size
);
463 #ifdef CONFIG_U3_DART
464 /* Do not map the DART space. Fortunately, it will be aligned
465 * in such a way that it will not cross two lmb regions and
466 * will fit within a single 16Mb page.
467 * The DART space is assumed to be a full 16Mb region even if
468 * we only use 2Mb of that space. We will use more of it later
469 * for AGP GART. We have to use a full 16Mb large page.
471 DBG("DART base: %lx\n", dart_tablebase
);
473 if (dart_tablebase
!= 0 && dart_tablebase
>= base
474 && dart_tablebase
< (base
+ size
)) {
475 unsigned long dart_table_end
= dart_tablebase
+ 16 * MB
;
476 if (base
!= dart_tablebase
)
477 BUG_ON(htab_bolt_mapping(base
, dart_tablebase
,
480 if ((base
+ size
) > dart_table_end
)
481 BUG_ON(htab_bolt_mapping(dart_tablebase
+16*MB
,
483 __pa(dart_table_end
),
488 #endif /* CONFIG_U3_DART */
489 BUG_ON(htab_bolt_mapping(base
, base
+ size
, __pa(base
),
490 mode_rw
, mmu_linear_psize
));
494 * If we have a memory_limit and we've allocated TCEs then we need to
495 * explicitly map the TCE area at the top of RAM. We also cope with the
496 * case that the TCEs start below memory_limit.
497 * tce_alloc_start/end are 16MB aligned so the mapping should work
498 * for either 4K or 16MB pages.
500 if (tce_alloc_start
) {
501 tce_alloc_start
= (unsigned long)__va(tce_alloc_start
);
502 tce_alloc_end
= (unsigned long)__va(tce_alloc_end
);
504 if (base
+ size
>= tce_alloc_start
)
505 tce_alloc_start
= base
+ size
+ 1;
507 BUG_ON(htab_bolt_mapping(tce_alloc_start
, tce_alloc_end
,
508 __pa(tce_alloc_start
), mode_rw
,
512 DBG(" <- htab_initialize()\n");
517 void htab_initialize_secondary(void)
519 if (!firmware_has_feature(FW_FEATURE_LPAR
))
520 mtspr(SPRN_SDR1
, _SDR1
);
524 * Called by asm hashtable.S for doing lazy icache flush
526 unsigned int hash_page_do_lazy_icache(unsigned int pp
, pte_t pte
, int trap
)
530 if (!pfn_valid(pte_pfn(pte
)))
533 page
= pte_page(pte
);
536 if (!test_bit(PG_arch_1
, &page
->flags
) && !PageReserved(page
)) {
538 __flush_dcache_icache(page_address(page
));
539 set_bit(PG_arch_1
, &page
->flags
);
548 * 1 - normal page fault
549 * -1 - critical hash insertion error
551 int hash_page(unsigned long ea
, unsigned long access
, unsigned long trap
)
555 struct mm_struct
*mm
;
558 int rc
, user_region
= 0, local
= 0;
560 DBG_LOW("hash_page(ea=%016lx, access=%lx, trap=%lx\n",
563 if ((ea
& ~REGION_MASK
) >= PGTABLE_RANGE
) {
564 DBG_LOW(" out of pgtable range !\n");
568 /* Get region & vsid */
569 switch (REGION_ID(ea
)) {
574 DBG_LOW(" user region with no mm !\n");
577 vsid
= get_vsid(mm
->context
.id
, ea
);
579 case VMALLOC_REGION_ID
:
581 vsid
= get_kernel_vsid(ea
);
585 * Send the problem up to do_page_fault
589 DBG_LOW(" mm=%p, mm->pgdir=%p, vsid=%016lx\n", mm
, mm
->pgd
, vsid
);
596 /* Check CPU locality */
597 tmp
= cpumask_of_cpu(smp_processor_id());
598 if (user_region
&& cpus_equal(mm
->cpu_vm_mask
, tmp
))
601 /* Handle hugepage regions */
602 if (unlikely(in_hugepage_area(mm
->context
, ea
))) {
603 DBG_LOW(" -> huge page !\n");
604 return hash_huge_page(mm
, access
, ea
, vsid
, local
, trap
);
607 /* Get PTE and page size from page tables */
608 ptep
= find_linux_pte(pgdir
, ea
);
609 if (ptep
== NULL
|| !pte_present(*ptep
)) {
610 DBG_LOW(" no PTE !\n");
614 #ifndef CONFIG_PPC_64K_PAGES
615 DBG_LOW(" i-pte: %016lx\n", pte_val(*ptep
));
617 DBG_LOW(" i-pte: %016lx %016lx\n", pte_val(*ptep
),
618 pte_val(*(ptep
+ PTRS_PER_PTE
)));
620 /* Pre-check access permissions (will be re-checked atomically
621 * in __hash_page_XX but this pre-check is a fast path
623 if (access
& ~pte_val(*ptep
)) {
624 DBG_LOW(" no access !\n");
628 /* Do actual hashing */
629 #ifndef CONFIG_PPC_64K_PAGES
630 rc
= __hash_page_4K(ea
, access
, vsid
, ptep
, trap
, local
);
632 if (mmu_virtual_psize
== MMU_PAGE_64K
)
633 rc
= __hash_page_64K(ea
, access
, vsid
, ptep
, trap
, local
);
635 rc
= __hash_page_4K(ea
, access
, vsid
, ptep
, trap
, local
);
636 #endif /* CONFIG_PPC_64K_PAGES */
638 #ifndef CONFIG_PPC_64K_PAGES
639 DBG_LOW(" o-pte: %016lx\n", pte_val(*ptep
));
641 DBG_LOW(" o-pte: %016lx %016lx\n", pte_val(*ptep
),
642 pte_val(*(ptep
+ PTRS_PER_PTE
)));
644 DBG_LOW(" -> rc=%d\n", rc
);
647 EXPORT_SYMBOL_GPL(hash_page
);
649 void hash_preload(struct mm_struct
*mm
, unsigned long ea
,
650 unsigned long access
, unsigned long trap
)
659 /* We don't want huge pages prefaulted for now
661 if (unlikely(in_hugepage_area(mm
->context
, ea
)))
664 DBG_LOW("hash_preload(mm=%p, mm->pgdir=%p, ea=%016lx, access=%lx,"
665 " trap=%lx\n", mm
, mm
->pgd
, ea
, access
, trap
);
667 /* Get PTE, VSID, access mask */
671 ptep
= find_linux_pte(pgdir
, ea
);
674 vsid
= get_vsid(mm
->context
.id
, ea
);
677 local_irq_save(flags
);
678 mask
= cpumask_of_cpu(smp_processor_id());
679 if (cpus_equal(mm
->cpu_vm_mask
, mask
))
681 #ifndef CONFIG_PPC_64K_PAGES
682 __hash_page_4K(ea
, access
, vsid
, ptep
, trap
, local
);
684 if (mmu_virtual_psize
== MMU_PAGE_64K
)
685 __hash_page_64K(ea
, access
, vsid
, ptep
, trap
, local
);
687 __hash_page_4K(ea
, access
, vsid
, ptep
, trap
, local
);
688 #endif /* CONFIG_PPC_64K_PAGES */
689 local_irq_restore(flags
);
692 void flush_hash_page(unsigned long va
, real_pte_t pte
, int psize
, int local
)
694 unsigned long hash
, index
, shift
, hidx
, slot
;
696 DBG_LOW("flush_hash_page(va=%016x)\n", va
);
697 pte_iterate_hashed_subpages(pte
, psize
, va
, index
, shift
) {
698 hash
= hpt_hash(va
, shift
);
699 hidx
= __rpte_to_hidx(pte
, index
);
700 if (hidx
& _PTEIDX_SECONDARY
)
702 slot
= (hash
& htab_hash_mask
) * HPTES_PER_GROUP
;
703 slot
+= hidx
& _PTEIDX_GROUP_IX
;
704 DBG_LOW(" sub %d: hash=%x, hidx=%x\n", index
, slot
, hidx
);
705 ppc_md
.hpte_invalidate(slot
, va
, psize
, local
);
706 } pte_iterate_hashed_end();
709 void flush_hash_range(unsigned long number
, int local
)
711 if (ppc_md
.flush_hash_range
)
712 ppc_md
.flush_hash_range(number
, local
);
715 struct ppc64_tlb_batch
*batch
=
716 &__get_cpu_var(ppc64_tlb_batch
);
718 for (i
= 0; i
< number
; i
++)
719 flush_hash_page(batch
->vaddr
[i
], batch
->pte
[i
],
720 batch
->psize
, local
);
724 static inline void make_bl(unsigned int *insn_addr
, void *func
)
726 unsigned long funcp
= *((unsigned long *)func
);
727 int offset
= funcp
- (unsigned long)insn_addr
;
729 *insn_addr
= (unsigned int)(0x48000001 | (offset
& 0x03fffffc));
730 flush_icache_range((unsigned long)insn_addr
, 4+
731 (unsigned long)insn_addr
);
735 * low_hash_fault is called when we the low level hash code failed
736 * to instert a PTE due to an hypervisor error
738 void low_hash_fault(struct pt_regs
*regs
, unsigned long address
)
740 if (user_mode(regs
)) {
743 info
.si_signo
= SIGBUS
;
745 info
.si_code
= BUS_ADRERR
;
746 info
.si_addr
= (void __user
*)address
;
747 force_sig_info(SIGBUS
, &info
, current
);
750 bad_page_fault(regs
, address
, SIGBUS
);
753 void __init
htab_finish_init(void)
755 extern unsigned int *htab_call_hpte_insert1
;
756 extern unsigned int *htab_call_hpte_insert2
;
757 extern unsigned int *htab_call_hpte_remove
;
758 extern unsigned int *htab_call_hpte_updatepp
;
760 #ifdef CONFIG_PPC_64K_PAGES
761 extern unsigned int *ht64_call_hpte_insert1
;
762 extern unsigned int *ht64_call_hpte_insert2
;
763 extern unsigned int *ht64_call_hpte_remove
;
764 extern unsigned int *ht64_call_hpte_updatepp
;
766 make_bl(ht64_call_hpte_insert1
, ppc_md
.hpte_insert
);
767 make_bl(ht64_call_hpte_insert2
, ppc_md
.hpte_insert
);
768 make_bl(ht64_call_hpte_remove
, ppc_md
.hpte_remove
);
769 make_bl(ht64_call_hpte_updatepp
, ppc_md
.hpte_updatepp
);
770 #endif /* CONFIG_PPC_64K_PAGES */
772 make_bl(htab_call_hpte_insert1
, ppc_md
.hpte_insert
);
773 make_bl(htab_call_hpte_insert2
, ppc_md
.hpte_insert
);
774 make_bl(htab_call_hpte_remove
, ppc_md
.hpte_remove
);
775 make_bl(htab_call_hpte_updatepp
, ppc_md
.hpte_updatepp
);