2 * PowerPC64 port by Mike Corrigan and Dave Engebretsen
3 * {mikejc|engebret}@us.ibm.com
5 * Copyright (c) 2000 Mike Corrigan <mikejc@us.ibm.com>
7 * SMP scalability work:
8 * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
13 * PowerPC Hashed Page Table functions
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
24 #include <linux/spinlock.h>
25 #include <linux/errno.h>
26 #include <linux/sched.h>
27 #include <linux/proc_fs.h>
28 #include <linux/stat.h>
29 #include <linux/sysctl.h>
30 #include <linux/export.h>
31 #include <linux/ctype.h>
32 #include <linux/cache.h>
33 #include <linux/init.h>
34 #include <linux/signal.h>
35 #include <linux/memblock.h>
36 #include <linux/context_tracking.h>
37 #include <linux/libfdt.h>
39 #include <asm/processor.h>
40 #include <asm/pgtable.h>
42 #include <asm/mmu_context.h>
44 #include <asm/types.h>
45 #include <asm/uaccess.h>
46 #include <asm/machdep.h>
48 #include <asm/tlbflush.h>
52 #include <asm/cacheflush.h>
53 #include <asm/cputable.h>
54 #include <asm/sections.h>
55 #include <asm/copro.h>
57 #include <asm/code-patching.h>
58 #include <asm/fadump.h>
59 #include <asm/firmware.h>
61 #include <asm/trace.h>
65 #define DBG(fmt...) udbg_printf(fmt)
71 #define DBG_LOW(fmt...) udbg_printf(fmt)
73 #define DBG_LOW(fmt...)
81 * Note: pte --> Linux PTE
82 * HPTE --> PowerPC Hashed Page Table Entry
85 * htab_initialize is called with the MMU off (of course), but
86 * the kernel has been copied down to zero so it can directly
87 * reference global data. At this point it is very difficult
88 * to print debug info.
92 static unsigned long _SDR1
;
93 struct mmu_psize_def mmu_psize_defs
[MMU_PAGE_COUNT
];
94 EXPORT_SYMBOL_GPL(mmu_psize_defs
);
96 struct hash_pte
*htab_address
;
97 unsigned long htab_size_bytes
;
98 unsigned long htab_hash_mask
;
99 EXPORT_SYMBOL_GPL(htab_hash_mask
);
100 int mmu_linear_psize
= MMU_PAGE_4K
;
101 EXPORT_SYMBOL_GPL(mmu_linear_psize
);
102 int mmu_virtual_psize
= MMU_PAGE_4K
;
103 int mmu_vmalloc_psize
= MMU_PAGE_4K
;
104 #ifdef CONFIG_SPARSEMEM_VMEMMAP
105 int mmu_vmemmap_psize
= MMU_PAGE_4K
;
107 int mmu_io_psize
= MMU_PAGE_4K
;
108 int mmu_kernel_ssize
= MMU_SEGSIZE_256M
;
109 EXPORT_SYMBOL_GPL(mmu_kernel_ssize
);
110 int mmu_highuser_ssize
= MMU_SEGSIZE_256M
;
111 u16 mmu_slb_size
= 64;
112 EXPORT_SYMBOL_GPL(mmu_slb_size
);
113 #ifdef CONFIG_PPC_64K_PAGES
114 int mmu_ci_restrictions
;
116 #ifdef CONFIG_DEBUG_PAGEALLOC
117 static u8
*linear_map_hash_slots
;
118 static unsigned long linear_map_hash_count
;
119 static DEFINE_SPINLOCK(linear_map_hash_lock
);
120 #endif /* CONFIG_DEBUG_PAGEALLOC */
122 /* There are definitions of page sizes arrays to be used when none
123 * is provided by the firmware.
126 /* Pre-POWER4 CPUs (4k pages only)
128 static struct mmu_psize_def mmu_psize_defaults_old
[] = {
132 .penc
= {[MMU_PAGE_4K
] = 0, [1 ... MMU_PAGE_COUNT
- 1] = -1},
138 /* POWER4, GPUL, POWER5
140 * Support for 16Mb large pages
142 static struct mmu_psize_def mmu_psize_defaults_gp
[] = {
146 .penc
= {[MMU_PAGE_4K
] = 0, [1 ... MMU_PAGE_COUNT
- 1] = -1},
153 .penc
= {[0 ... MMU_PAGE_16M
- 1] = -1, [MMU_PAGE_16M
] = 0,
154 [MMU_PAGE_16M
+ 1 ... MMU_PAGE_COUNT
- 1] = -1 },
161 * 'R' and 'C' update notes:
162 * - Under pHyp or KVM, the updatepp path will not set C, thus it *will*
163 * create writeable HPTEs without C set, because the hcall H_PROTECT
164 * that we use in that case will not update C
165 * - The above is however not a problem, because we also don't do that
166 * fancy "no flush" variant of eviction and we use H_REMOVE which will
167 * do the right thing and thus we don't have the race I described earlier
169 * - Under bare metal, we do have the race, so we need R and C set
170 * - We make sure R is always set and never lost
171 * - C is _PAGE_DIRTY, and *should* always be set for a writeable mapping
173 unsigned long htab_convert_pte_flags(unsigned long pteflags
)
175 unsigned long rflags
= 0;
177 /* _PAGE_EXEC -> NOEXEC */
178 if ((pteflags
& _PAGE_EXEC
) == 0)
182 * Linux uses slb key 0 for kernel and 1 for user.
183 * kernel RW areas are mapped with PPP=0b000
184 * User area is mapped with PPP=0b010 for read/write
185 * or PPP=0b011 for read-only (including writeable but clean pages).
187 if (pteflags
& _PAGE_PRIVILEGED
) {
189 * Kernel read only mapped with ppp bits 0b110
191 if (!(pteflags
& _PAGE_WRITE
))
192 rflags
|= (HPTE_R_PP0
| 0x2);
194 if (pteflags
& _PAGE_RWX
)
196 if (!((pteflags
& _PAGE_WRITE
) && (pteflags
& _PAGE_DIRTY
)))
200 * We can't allow hardware to update hpte bits. Hence always
201 * set 'R' bit and set 'C' if it is a write fault
205 if (pteflags
& _PAGE_DIRTY
)
211 if ((pteflags
& _PAGE_CACHE_CTL
) == _PAGE_TOLERANT
)
213 else if ((pteflags
& _PAGE_CACHE_CTL
) == _PAGE_NON_IDEMPOTENT
)
214 rflags
|= (HPTE_R_I
| HPTE_R_G
);
215 else if ((pteflags
& _PAGE_CACHE_CTL
) == _PAGE_SAO
)
216 rflags
|= (HPTE_R_W
| HPTE_R_I
| HPTE_R_M
);
219 * Add memory coherence if cache inhibited is not set
226 int htab_bolt_mapping(unsigned long vstart
, unsigned long vend
,
227 unsigned long pstart
, unsigned long prot
,
228 int psize
, int ssize
)
230 unsigned long vaddr
, paddr
;
231 unsigned int step
, shift
;
234 shift
= mmu_psize_defs
[psize
].shift
;
237 prot
= htab_convert_pte_flags(prot
);
239 DBG("htab_bolt_mapping(%lx..%lx -> %lx (%lx,%d,%d)\n",
240 vstart
, vend
, pstart
, prot
, psize
, ssize
);
242 for (vaddr
= vstart
, paddr
= pstart
; vaddr
< vend
;
243 vaddr
+= step
, paddr
+= step
) {
244 unsigned long hash
, hpteg
;
245 unsigned long vsid
= get_kernel_vsid(vaddr
, ssize
);
246 unsigned long vpn
= hpt_vpn(vaddr
, vsid
, ssize
);
247 unsigned long tprot
= prot
;
250 * If we hit a bad address return error.
254 /* Make kernel text executable */
255 if (overlaps_kernel_text(vaddr
, vaddr
+ step
))
258 /* Make kvm guest trampolines executable */
259 if (overlaps_kvm_tmp(vaddr
, vaddr
+ step
))
263 * If relocatable, check if it overlaps interrupt vectors that
264 * are copied down to real 0. For relocatable kernel
265 * (e.g. kdump case) we copy interrupt vectors down to real
266 * address 0. Mark that region as executable. This is
267 * because on p8 system with relocation on exception feature
268 * enabled, exceptions are raised with MMU (IR=DR=1) ON. Hence
269 * in order to execute the interrupt handlers in virtual
270 * mode the vector region need to be marked as executable.
272 if ((PHYSICAL_START
> MEMORY_START
) &&
273 overlaps_interrupt_vector_text(vaddr
, vaddr
+ step
))
276 hash
= hpt_hash(vpn
, shift
, ssize
);
277 hpteg
= ((hash
& htab_hash_mask
) * HPTES_PER_GROUP
);
279 BUG_ON(!ppc_md
.hpte_insert
);
280 ret
= ppc_md
.hpte_insert(hpteg
, vpn
, paddr
, tprot
,
281 HPTE_V_BOLTED
, psize
, psize
, ssize
);
286 #ifdef CONFIG_DEBUG_PAGEALLOC
287 if (debug_pagealloc_enabled() &&
288 (paddr
>> PAGE_SHIFT
) < linear_map_hash_count
)
289 linear_map_hash_slots
[paddr
>> PAGE_SHIFT
] = ret
| 0x80;
290 #endif /* CONFIG_DEBUG_PAGEALLOC */
292 return ret
< 0 ? ret
: 0;
295 int htab_remove_mapping(unsigned long vstart
, unsigned long vend
,
296 int psize
, int ssize
)
299 unsigned int step
, shift
;
303 shift
= mmu_psize_defs
[psize
].shift
;
306 if (!ppc_md
.hpte_removebolted
)
309 for (vaddr
= vstart
; vaddr
< vend
; vaddr
+= step
) {
310 rc
= ppc_md
.hpte_removebolted(vaddr
, psize
, ssize
);
322 static bool disable_1tb_segments
= false;
324 static int __init
parse_disable_1tb_segments(char *p
)
326 disable_1tb_segments
= true;
329 early_param("disable_1tb_segments", parse_disable_1tb_segments
);
331 static int __init
htab_dt_scan_seg_sizes(unsigned long node
,
332 const char *uname
, int depth
,
335 const char *type
= of_get_flat_dt_prop(node
, "device_type", NULL
);
339 /* We are scanning "cpu" nodes only */
340 if (type
== NULL
|| strcmp(type
, "cpu") != 0)
343 prop
= of_get_flat_dt_prop(node
, "ibm,processor-segment-sizes", &size
);
346 for (; size
>= 4; size
-= 4, ++prop
) {
347 if (be32_to_cpu(prop
[0]) == 40) {
348 DBG("1T segment support detected\n");
350 if (disable_1tb_segments
) {
351 DBG("1T segments disabled by command line\n");
355 cur_cpu_spec
->mmu_features
|= MMU_FTR_1T_SEGMENT
;
359 cur_cpu_spec
->mmu_features
&= ~MMU_FTR_NO_SLBIE_B
;
363 static void __init
htab_init_seg_sizes(void)
365 of_scan_flat_dt(htab_dt_scan_seg_sizes
, NULL
);
368 static int __init
get_idx_from_shift(unsigned int shift
)
392 static int __init
htab_dt_scan_page_sizes(unsigned long node
,
393 const char *uname
, int depth
,
396 const char *type
= of_get_flat_dt_prop(node
, "device_type", NULL
);
400 /* We are scanning "cpu" nodes only */
401 if (type
== NULL
|| strcmp(type
, "cpu") != 0)
404 prop
= of_get_flat_dt_prop(node
, "ibm,segment-page-sizes", &size
);
408 pr_info("Page sizes from device-tree:\n");
410 cur_cpu_spec
->mmu_features
&= ~(MMU_FTR_16M_PAGE
);
412 unsigned int base_shift
= be32_to_cpu(prop
[0]);
413 unsigned int slbenc
= be32_to_cpu(prop
[1]);
414 unsigned int lpnum
= be32_to_cpu(prop
[2]);
415 struct mmu_psize_def
*def
;
418 size
-= 3; prop
+= 3;
419 base_idx
= get_idx_from_shift(base_shift
);
421 /* skip the pte encoding also */
422 prop
+= lpnum
* 2; size
-= lpnum
* 2;
425 def
= &mmu_psize_defs
[base_idx
];
426 if (base_idx
== MMU_PAGE_16M
)
427 cur_cpu_spec
->mmu_features
|= MMU_FTR_16M_PAGE
;
429 def
->shift
= base_shift
;
430 if (base_shift
<= 23)
433 def
->avpnm
= (1 << (base_shift
- 23)) - 1;
436 * We don't know for sure what's up with tlbiel, so
437 * for now we only set it for 4K and 64K pages
439 if (base_idx
== MMU_PAGE_4K
|| base_idx
== MMU_PAGE_64K
)
444 while (size
> 0 && lpnum
) {
445 unsigned int shift
= be32_to_cpu(prop
[0]);
446 int penc
= be32_to_cpu(prop
[1]);
448 prop
+= 2; size
-= 2;
451 idx
= get_idx_from_shift(shift
);
456 pr_err("Invalid penc for base_shift=%d "
457 "shift=%d\n", base_shift
, shift
);
459 def
->penc
[idx
] = penc
;
460 pr_info("base_shift=%d: shift=%d, sllp=0x%04lx,"
461 " avpnm=0x%08lx, tlbiel=%d, penc=%d\n",
462 base_shift
, shift
, def
->sllp
,
463 def
->avpnm
, def
->tlbiel
, def
->penc
[idx
]);
470 #ifdef CONFIG_HUGETLB_PAGE
471 /* Scan for 16G memory blocks that have been set aside for huge pages
472 * and reserve those blocks for 16G huge pages.
474 static int __init
htab_dt_scan_hugepage_blocks(unsigned long node
,
475 const char *uname
, int depth
,
477 const char *type
= of_get_flat_dt_prop(node
, "device_type", NULL
);
478 const __be64
*addr_prop
;
479 const __be32
*page_count_prop
;
480 unsigned int expected_pages
;
481 long unsigned int phys_addr
;
482 long unsigned int block_size
;
484 /* We are scanning "memory" nodes only */
485 if (type
== NULL
|| strcmp(type
, "memory") != 0)
488 /* This property is the log base 2 of the number of virtual pages that
489 * will represent this memory block. */
490 page_count_prop
= of_get_flat_dt_prop(node
, "ibm,expected#pages", NULL
);
491 if (page_count_prop
== NULL
)
493 expected_pages
= (1 << be32_to_cpu(page_count_prop
[0]));
494 addr_prop
= of_get_flat_dt_prop(node
, "reg", NULL
);
495 if (addr_prop
== NULL
)
497 phys_addr
= be64_to_cpu(addr_prop
[0]);
498 block_size
= be64_to_cpu(addr_prop
[1]);
499 if (block_size
!= (16 * GB
))
501 printk(KERN_INFO
"Huge page(16GB) memory: "
502 "addr = 0x%lX size = 0x%lX pages = %d\n",
503 phys_addr
, block_size
, expected_pages
);
504 if (phys_addr
+ (16 * GB
) <= memblock_end_of_DRAM()) {
505 memblock_reserve(phys_addr
, block_size
* expected_pages
);
506 add_gpage(phys_addr
, block_size
, expected_pages
);
510 #endif /* CONFIG_HUGETLB_PAGE */
512 static void mmu_psize_set_default_penc(void)
515 for (bpsize
= 0; bpsize
< MMU_PAGE_COUNT
; bpsize
++)
516 for (apsize
= 0; apsize
< MMU_PAGE_COUNT
; apsize
++)
517 mmu_psize_defs
[bpsize
].penc
[apsize
] = -1;
520 #ifdef CONFIG_PPC_64K_PAGES
522 static bool might_have_hea(void)
525 * The HEA ethernet adapter requires awareness of the
526 * GX bus. Without that awareness we can easily assume
527 * we will never see an HEA ethernet device.
529 #ifdef CONFIG_IBMEBUS
530 return !cpu_has_feature(CPU_FTR_ARCH_207S
);
536 #endif /* #ifdef CONFIG_PPC_64K_PAGES */
538 static void __init
htab_init_page_sizes(void)
542 /* se the invalid penc to -1 */
543 mmu_psize_set_default_penc();
545 /* Default to 4K pages only */
546 memcpy(mmu_psize_defs
, mmu_psize_defaults_old
,
547 sizeof(mmu_psize_defaults_old
));
550 * Try to find the available page sizes in the device-tree
552 rc
= of_scan_flat_dt(htab_dt_scan_page_sizes
, NULL
);
553 if (rc
!= 0) /* Found */
557 * Not in the device-tree, let's fallback on known size
558 * list for 16M capable GP & GR
560 if (mmu_has_feature(MMU_FTR_16M_PAGE
))
561 memcpy(mmu_psize_defs
, mmu_psize_defaults_gp
,
562 sizeof(mmu_psize_defaults_gp
));
564 if (!debug_pagealloc_enabled()) {
566 * Pick a size for the linear mapping. Currently, we only
567 * support 16M, 1M and 4K which is the default
569 if (mmu_psize_defs
[MMU_PAGE_16M
].shift
)
570 mmu_linear_psize
= MMU_PAGE_16M
;
571 else if (mmu_psize_defs
[MMU_PAGE_1M
].shift
)
572 mmu_linear_psize
= MMU_PAGE_1M
;
575 #ifdef CONFIG_PPC_64K_PAGES
577 * Pick a size for the ordinary pages. Default is 4K, we support
578 * 64K for user mappings and vmalloc if supported by the processor.
579 * We only use 64k for ioremap if the processor
580 * (and firmware) support cache-inhibited large pages.
581 * If not, we use 4k and set mmu_ci_restrictions so that
582 * hash_page knows to switch processes that use cache-inhibited
583 * mappings to 4k pages.
585 if (mmu_psize_defs
[MMU_PAGE_64K
].shift
) {
586 mmu_virtual_psize
= MMU_PAGE_64K
;
587 mmu_vmalloc_psize
= MMU_PAGE_64K
;
588 if (mmu_linear_psize
== MMU_PAGE_4K
)
589 mmu_linear_psize
= MMU_PAGE_64K
;
590 if (mmu_has_feature(MMU_FTR_CI_LARGE_PAGE
)) {
592 * When running on pSeries using 64k pages for ioremap
593 * would stop us accessing the HEA ethernet. So if we
594 * have the chance of ever seeing one, stay at 4k.
596 if (!might_have_hea() || !machine_is(pseries
))
597 mmu_io_psize
= MMU_PAGE_64K
;
599 mmu_ci_restrictions
= 1;
601 #endif /* CONFIG_PPC_64K_PAGES */
603 #ifdef CONFIG_SPARSEMEM_VMEMMAP
604 /* We try to use 16M pages for vmemmap if that is supported
605 * and we have at least 1G of RAM at boot
607 if (mmu_psize_defs
[MMU_PAGE_16M
].shift
&&
608 memblock_phys_mem_size() >= 0x40000000)
609 mmu_vmemmap_psize
= MMU_PAGE_16M
;
610 else if (mmu_psize_defs
[MMU_PAGE_64K
].shift
)
611 mmu_vmemmap_psize
= MMU_PAGE_64K
;
613 mmu_vmemmap_psize
= MMU_PAGE_4K
;
614 #endif /* CONFIG_SPARSEMEM_VMEMMAP */
616 printk(KERN_DEBUG
"Page orders: linear mapping = %d, "
617 "virtual = %d, io = %d"
618 #ifdef CONFIG_SPARSEMEM_VMEMMAP
622 mmu_psize_defs
[mmu_linear_psize
].shift
,
623 mmu_psize_defs
[mmu_virtual_psize
].shift
,
624 mmu_psize_defs
[mmu_io_psize
].shift
625 #ifdef CONFIG_SPARSEMEM_VMEMMAP
626 ,mmu_psize_defs
[mmu_vmemmap_psize
].shift
630 #ifdef CONFIG_HUGETLB_PAGE
631 /* Reserve 16G huge page memory sections for huge pages */
632 of_scan_flat_dt(htab_dt_scan_hugepage_blocks
, NULL
);
633 #endif /* CONFIG_HUGETLB_PAGE */
636 static int __init
htab_dt_scan_pftsize(unsigned long node
,
637 const char *uname
, int depth
,
640 const char *type
= of_get_flat_dt_prop(node
, "device_type", NULL
);
643 /* We are scanning "cpu" nodes only */
644 if (type
== NULL
|| strcmp(type
, "cpu") != 0)
647 prop
= of_get_flat_dt_prop(node
, "ibm,pft-size", NULL
);
649 /* pft_size[0] is the NUMA CEC cookie */
650 ppc64_pft_size
= be32_to_cpu(prop
[1]);
656 unsigned htab_shift_for_mem_size(unsigned long mem_size
)
658 unsigned memshift
= __ilog2(mem_size
);
659 unsigned pshift
= mmu_psize_defs
[mmu_virtual_psize
].shift
;
662 /* round mem_size up to next power of 2 */
663 if ((1UL << memshift
) < mem_size
)
666 /* aim for 2 pages / pteg */
667 pteg_shift
= memshift
- (pshift
+ 1);
670 * 2^11 PTEGS of 128 bytes each, ie. 2^18 bytes is the minimum htab
671 * size permitted by the architecture.
673 return max(pteg_shift
+ 7, 18U);
676 static unsigned long __init
htab_get_table_size(void)
678 /* If hash size isn't already provided by the platform, we try to
679 * retrieve it from the device-tree. If it's not there neither, we
680 * calculate it now based on the total RAM size
682 if (ppc64_pft_size
== 0)
683 of_scan_flat_dt(htab_dt_scan_pftsize
, NULL
);
685 return 1UL << ppc64_pft_size
;
687 return 1UL << htab_shift_for_mem_size(memblock_phys_mem_size());
690 #ifdef CONFIG_MEMORY_HOTPLUG
691 int create_section_mapping(unsigned long start
, unsigned long end
)
693 int rc
= htab_bolt_mapping(start
, end
, __pa(start
),
694 pgprot_val(PAGE_KERNEL
), mmu_linear_psize
,
698 int rc2
= htab_remove_mapping(start
, end
, mmu_linear_psize
,
700 BUG_ON(rc2
&& (rc2
!= -ENOENT
));
705 int remove_section_mapping(unsigned long start
, unsigned long end
)
707 int rc
= htab_remove_mapping(start
, end
, mmu_linear_psize
,
712 #endif /* CONFIG_MEMORY_HOTPLUG */
714 static void __init
hash_init_partition_table(phys_addr_t hash_table
,
715 unsigned long htab_size
)
717 unsigned long ps_field
;
718 unsigned long patb_size
= 1UL << PATB_SIZE_SHIFT
;
721 * slb llp encoding for the page size used in VPM real mode.
722 * We can ignore that for lpid 0
725 htab_size
= __ilog2(htab_size
) - 18;
727 BUILD_BUG_ON_MSG((PATB_SIZE_SHIFT
> 24), "Partition table size too large.");
728 partition_tb
= __va(memblock_alloc_base(patb_size
, patb_size
,
729 MEMBLOCK_ALLOC_ANYWHERE
));
731 /* Initialize the Partition Table with no entries */
732 memset((void *)partition_tb
, 0, patb_size
);
733 partition_tb
->patb0
= cpu_to_be64(ps_field
| hash_table
| htab_size
);
735 * FIXME!! This should be done via update_partition table
736 * For now UPRT is 0 for us.
738 partition_tb
->patb1
= 0;
739 pr_info("Partition table %p\n", partition_tb
);
741 * update partition table control register,
744 mtspr(SPRN_PTCR
, __pa(partition_tb
) | (PATB_SIZE_SHIFT
- 12));
748 static void __init
htab_initialize(void)
751 unsigned long pteg_count
;
753 unsigned long base
= 0, size
= 0;
754 struct memblock_region
*reg
;
756 DBG(" -> htab_initialize()\n");
758 /* Initialize segment sizes */
759 htab_init_seg_sizes();
761 /* Initialize page sizes */
762 htab_init_page_sizes();
764 if (mmu_has_feature(MMU_FTR_1T_SEGMENT
)) {
765 mmu_kernel_ssize
= MMU_SEGSIZE_1T
;
766 mmu_highuser_ssize
= MMU_SEGSIZE_1T
;
767 printk(KERN_INFO
"Using 1TB segments\n");
771 * Calculate the required size of the htab. We want the number of
772 * PTEGs to equal one half the number of real pages.
774 htab_size_bytes
= htab_get_table_size();
775 pteg_count
= htab_size_bytes
>> 7;
777 htab_hash_mask
= pteg_count
- 1;
779 if (firmware_has_feature(FW_FEATURE_LPAR
) ||
780 firmware_has_feature(FW_FEATURE_PS3_LV1
)) {
781 /* Using a hypervisor which owns the htab */
784 #ifdef CONFIG_FA_DUMP
786 * If firmware assisted dump is active firmware preserves
787 * the contents of htab along with entire partition memory.
788 * Clear the htab if firmware assisted dump is active so
789 * that we dont end up using old mappings.
791 if (is_fadump_active() && ppc_md
.hpte_clear_all
)
792 ppc_md
.hpte_clear_all();
795 unsigned long limit
= MEMBLOCK_ALLOC_ANYWHERE
;
797 #ifdef CONFIG_PPC_CELL
799 * Cell may require the hash table down low when using the
800 * Axon IOMMU in order to fit the dynamic region over it, see
801 * comments in cell/iommu.c
803 if (fdt_subnode_offset(initial_boot_params
, 0, "axon") > 0) {
805 pr_info("Hash table forced below 2G for Axon IOMMU\n");
807 #endif /* CONFIG_PPC_CELL */
809 table
= memblock_alloc_base(htab_size_bytes
, htab_size_bytes
,
812 DBG("Hash table allocated at %lx, size: %lx\n", table
,
815 htab_address
= __va(table
);
817 /* htab absolute addr + encoded htabsize */
818 _SDR1
= table
+ __ilog2(htab_size_bytes
) - 18;
820 /* Initialize the HPT with no entries */
821 memset((void *)table
, 0, htab_size_bytes
);
823 if (!cpu_has_feature(CPU_FTR_ARCH_300
))
825 mtspr(SPRN_SDR1
, _SDR1
);
827 hash_init_partition_table(table
, htab_size_bytes
);
830 prot
= pgprot_val(PAGE_KERNEL
);
832 #ifdef CONFIG_DEBUG_PAGEALLOC
833 if (debug_pagealloc_enabled()) {
834 linear_map_hash_count
= memblock_end_of_DRAM() >> PAGE_SHIFT
;
835 linear_map_hash_slots
= __va(memblock_alloc_base(
836 linear_map_hash_count
, 1, ppc64_rma_size
));
837 memset(linear_map_hash_slots
, 0, linear_map_hash_count
);
839 #endif /* CONFIG_DEBUG_PAGEALLOC */
841 /* On U3 based machines, we need to reserve the DART area and
842 * _NOT_ map it to avoid cache paradoxes as it's remapped non
846 /* create bolted the linear mapping in the hash table */
847 for_each_memblock(memory
, reg
) {
848 base
= (unsigned long)__va(reg
->base
);
851 DBG("creating mapping for region: %lx..%lx (prot: %lx)\n",
854 BUG_ON(htab_bolt_mapping(base
, base
+ size
, __pa(base
),
855 prot
, mmu_linear_psize
, mmu_kernel_ssize
));
857 memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE
);
860 * If we have a memory_limit and we've allocated TCEs then we need to
861 * explicitly map the TCE area at the top of RAM. We also cope with the
862 * case that the TCEs start below memory_limit.
863 * tce_alloc_start/end are 16MB aligned so the mapping should work
864 * for either 4K or 16MB pages.
866 if (tce_alloc_start
) {
867 tce_alloc_start
= (unsigned long)__va(tce_alloc_start
);
868 tce_alloc_end
= (unsigned long)__va(tce_alloc_end
);
870 if (base
+ size
>= tce_alloc_start
)
871 tce_alloc_start
= base
+ size
+ 1;
873 BUG_ON(htab_bolt_mapping(tce_alloc_start
, tce_alloc_end
,
874 __pa(tce_alloc_start
), prot
,
875 mmu_linear_psize
, mmu_kernel_ssize
));
879 DBG(" <- htab_initialize()\n");
884 void __init __weak
hpte_init_lpar(void)
886 panic("FW_FEATURE_LPAR set but no LPAR support compiled\n");
889 void __init
hash__early_init_mmu(void)
892 * initialize page table size
894 __pte_frag_nr
= H_PTE_FRAG_NR
;
895 __pte_frag_size_shift
= H_PTE_FRAG_SIZE_SHIFT
;
897 __pte_index_size
= H_PTE_INDEX_SIZE
;
898 __pmd_index_size
= H_PMD_INDEX_SIZE
;
899 __pud_index_size
= H_PUD_INDEX_SIZE
;
900 __pgd_index_size
= H_PGD_INDEX_SIZE
;
901 __pmd_cache_index
= H_PMD_CACHE_INDEX
;
902 __pte_table_size
= H_PTE_TABLE_SIZE
;
903 __pmd_table_size
= H_PMD_TABLE_SIZE
;
904 __pud_table_size
= H_PUD_TABLE_SIZE
;
905 __pgd_table_size
= H_PGD_TABLE_SIZE
;
907 * 4k use hugepd format, so for hash set then to
914 __kernel_virt_start
= H_KERN_VIRT_START
;
915 __kernel_virt_size
= H_KERN_VIRT_SIZE
;
916 __vmalloc_start
= H_VMALLOC_START
;
917 __vmalloc_end
= H_VMALLOC_END
;
918 vmemmap
= (struct page
*)H_VMEMMAP_BASE
;
919 ioremap_bot
= IOREMAP_BASE
;
922 pci_io_base
= ISA_IO_BASE
;
925 /* Select appropriate backend */
926 if (firmware_has_feature(FW_FEATURE_PS3_LV1
))
928 else if (firmware_has_feature(FW_FEATURE_LPAR
))
933 /* Initialize the MMU Hash table and create the linear mapping
934 * of memory. Has to be done before SLB initialization as this is
935 * currently where the page size encoding is obtained.
939 pr_info("Initializing hash mmu with SLB\n");
940 /* Initialize SLB management */
945 void hash__early_init_mmu_secondary(void)
947 /* Initialize hash table for that CPU */
948 if (!firmware_has_feature(FW_FEATURE_LPAR
)) {
949 if (!cpu_has_feature(CPU_FTR_ARCH_300
))
950 mtspr(SPRN_SDR1
, _SDR1
);
953 __pa(partition_tb
) | (PATB_SIZE_SHIFT
- 12));
958 #endif /* CONFIG_SMP */
961 * Called by asm hashtable.S for doing lazy icache flush
963 unsigned int hash_page_do_lazy_icache(unsigned int pp
, pte_t pte
, int trap
)
967 if (!pfn_valid(pte_pfn(pte
)))
970 page
= pte_page(pte
);
973 if (!test_bit(PG_arch_1
, &page
->flags
) && !PageReserved(page
)) {
975 flush_dcache_icache_page(page
);
976 set_bit(PG_arch_1
, &page
->flags
);
983 #ifdef CONFIG_PPC_MM_SLICES
984 static unsigned int get_paca_psize(unsigned long addr
)
987 unsigned char *hpsizes
;
988 unsigned long index
, mask_index
;
990 if (addr
< SLICE_LOW_TOP
) {
991 lpsizes
= get_paca()->mm_ctx_low_slices_psize
;
992 index
= GET_LOW_SLICE_INDEX(addr
);
993 return (lpsizes
>> (index
* 4)) & 0xF;
995 hpsizes
= get_paca()->mm_ctx_high_slices_psize
;
996 index
= GET_HIGH_SLICE_INDEX(addr
);
997 mask_index
= index
& 0x1;
998 return (hpsizes
[index
>> 1] >> (mask_index
* 4)) & 0xF;
1002 unsigned int get_paca_psize(unsigned long addr
)
1004 return get_paca()->mm_ctx_user_psize
;
1009 * Demote a segment to using 4k pages.
1010 * For now this makes the whole process use 4k pages.
1012 #ifdef CONFIG_PPC_64K_PAGES
1013 void demote_segment_4k(struct mm_struct
*mm
, unsigned long addr
)
1015 if (get_slice_psize(mm
, addr
) == MMU_PAGE_4K
)
1017 slice_set_range_psize(mm
, addr
, 1, MMU_PAGE_4K
);
1018 copro_flush_all_slbs(mm
);
1019 if ((get_paca_psize(addr
) != MMU_PAGE_4K
) && (current
->mm
== mm
)) {
1021 copy_mm_to_paca(&mm
->context
);
1022 slb_flush_and_rebolt();
1025 #endif /* CONFIG_PPC_64K_PAGES */
1027 #ifdef CONFIG_PPC_SUBPAGE_PROT
1029 * This looks up a 2-bit protection code for a 4k subpage of a 64k page.
1030 * Userspace sets the subpage permissions using the subpage_prot system call.
1032 * Result is 0: full permissions, _PAGE_RW: read-only,
1033 * _PAGE_RWX: no access.
1035 static int subpage_protection(struct mm_struct
*mm
, unsigned long ea
)
1037 struct subpage_prot_table
*spt
= &mm
->context
.spt
;
1041 if (ea
>= spt
->maxaddr
)
1043 if (ea
< 0x100000000UL
) {
1044 /* addresses below 4GB use spt->low_prot */
1045 sbpm
= spt
->low_prot
;
1047 sbpm
= spt
->protptrs
[ea
>> SBP_L3_SHIFT
];
1051 sbpp
= sbpm
[(ea
>> SBP_L2_SHIFT
) & (SBP_L2_COUNT
- 1)];
1054 spp
= sbpp
[(ea
>> PAGE_SHIFT
) & (SBP_L1_COUNT
- 1)];
1056 /* extract 2-bit bitfield for this 4k subpage */
1057 spp
>>= 30 - 2 * ((ea
>> 12) & 0xf);
1060 * 0 -> full premission
1063 * We return the flag that need to be cleared.
1065 spp
= ((spp
& 2) ? _PAGE_RWX
: 0) | ((spp
& 1) ? _PAGE_WRITE
: 0);
1069 #else /* CONFIG_PPC_SUBPAGE_PROT */
1070 static inline int subpage_protection(struct mm_struct
*mm
, unsigned long ea
)
1076 void hash_failure_debug(unsigned long ea
, unsigned long access
,
1077 unsigned long vsid
, unsigned long trap
,
1078 int ssize
, int psize
, int lpsize
, unsigned long pte
)
1080 if (!printk_ratelimit())
1082 pr_info("mm: Hashing failure ! EA=0x%lx access=0x%lx current=%s\n",
1083 ea
, access
, current
->comm
);
1084 pr_info(" trap=0x%lx vsid=0x%lx ssize=%d base psize=%d psize %d pte=0x%lx\n",
1085 trap
, vsid
, ssize
, psize
, lpsize
, pte
);
1088 static void check_paca_psize(unsigned long ea
, struct mm_struct
*mm
,
1089 int psize
, bool user_region
)
1092 if (psize
!= get_paca_psize(ea
)) {
1093 copy_mm_to_paca(&mm
->context
);
1094 slb_flush_and_rebolt();
1096 } else if (get_paca()->vmalloc_sllp
!=
1097 mmu_psize_defs
[mmu_vmalloc_psize
].sllp
) {
1098 get_paca()->vmalloc_sllp
=
1099 mmu_psize_defs
[mmu_vmalloc_psize
].sllp
;
1100 slb_vmalloc_update();
1106 * 1 - normal page fault
1107 * -1 - critical hash insertion error
1108 * -2 - access not permitted by subpage protection mechanism
1110 int hash_page_mm(struct mm_struct
*mm
, unsigned long ea
,
1111 unsigned long access
, unsigned long trap
,
1112 unsigned long flags
)
1115 enum ctx_state prev_state
= exception_enter();
1120 const struct cpumask
*tmp
;
1121 int rc
, user_region
= 0;
1124 DBG_LOW("hash_page(ea=%016lx, access=%lx, trap=%lx\n",
1126 trace_hash_fault(ea
, access
, trap
);
1128 /* Get region & vsid */
1129 switch (REGION_ID(ea
)) {
1130 case USER_REGION_ID
:
1133 DBG_LOW(" user region with no mm !\n");
1137 psize
= get_slice_psize(mm
, ea
);
1138 ssize
= user_segment_size(ea
);
1139 vsid
= get_vsid(mm
->context
.id
, ea
, ssize
);
1141 case VMALLOC_REGION_ID
:
1142 vsid
= get_kernel_vsid(ea
, mmu_kernel_ssize
);
1143 if (ea
< VMALLOC_END
)
1144 psize
= mmu_vmalloc_psize
;
1146 psize
= mmu_io_psize
;
1147 ssize
= mmu_kernel_ssize
;
1150 /* Not a valid range
1151 * Send the problem up to do_page_fault
1156 DBG_LOW(" mm=%p, mm->pgdir=%p, vsid=%016lx\n", mm
, mm
->pgd
, vsid
);
1160 DBG_LOW("Bad address!\n");
1166 if (pgdir
== NULL
) {
1171 /* Check CPU locality */
1172 tmp
= cpumask_of(smp_processor_id());
1173 if (user_region
&& cpumask_equal(mm_cpumask(mm
), tmp
))
1174 flags
|= HPTE_LOCAL_UPDATE
;
1176 #ifndef CONFIG_PPC_64K_PAGES
1177 /* If we use 4K pages and our psize is not 4K, then we might
1178 * be hitting a special driver mapping, and need to align the
1179 * address before we fetch the PTE.
1181 * It could also be a hugepage mapping, in which case this is
1182 * not necessary, but it's not harmful, either.
1184 if (psize
!= MMU_PAGE_4K
)
1185 ea
&= ~((1ul << mmu_psize_defs
[psize
].shift
) - 1);
1186 #endif /* CONFIG_PPC_64K_PAGES */
1188 /* Get PTE and page size from page tables */
1189 ptep
= __find_linux_pte_or_hugepte(pgdir
, ea
, &is_thp
, &hugeshift
);
1190 if (ptep
== NULL
|| !pte_present(*ptep
)) {
1191 DBG_LOW(" no PTE !\n");
1196 /* Add _PAGE_PRESENT to the required access perm */
1197 access
|= _PAGE_PRESENT
;
1199 /* Pre-check access permissions (will be re-checked atomically
1200 * in __hash_page_XX but this pre-check is a fast path
1202 if (!check_pte_access(access
, pte_val(*ptep
))) {
1203 DBG_LOW(" no access !\n");
1210 rc
= __hash_page_thp(ea
, access
, vsid
, (pmd_t
*)ptep
,
1211 trap
, flags
, ssize
, psize
);
1212 #ifdef CONFIG_HUGETLB_PAGE
1214 rc
= __hash_page_huge(ea
, access
, vsid
, ptep
, trap
,
1215 flags
, ssize
, hugeshift
, psize
);
1219 * if we have hugeshift, and is not transhuge with
1220 * hugetlb disabled, something is really wrong.
1226 if (current
->mm
== mm
)
1227 check_paca_psize(ea
, mm
, psize
, user_region
);
1232 #ifndef CONFIG_PPC_64K_PAGES
1233 DBG_LOW(" i-pte: %016lx\n", pte_val(*ptep
));
1235 DBG_LOW(" i-pte: %016lx %016lx\n", pte_val(*ptep
),
1236 pte_val(*(ptep
+ PTRS_PER_PTE
)));
1238 /* Do actual hashing */
1239 #ifdef CONFIG_PPC_64K_PAGES
1240 /* If H_PAGE_4K_PFN is set, make sure this is a 4k segment */
1241 if ((pte_val(*ptep
) & H_PAGE_4K_PFN
) && psize
== MMU_PAGE_64K
) {
1242 demote_segment_4k(mm
, ea
);
1243 psize
= MMU_PAGE_4K
;
1246 /* If this PTE is non-cacheable and we have restrictions on
1247 * using non cacheable large pages, then we switch to 4k
1249 if (mmu_ci_restrictions
&& psize
== MMU_PAGE_64K
&& pte_ci(*ptep
)) {
1251 demote_segment_4k(mm
, ea
);
1252 psize
= MMU_PAGE_4K
;
1253 } else if (ea
< VMALLOC_END
) {
1255 * some driver did a non-cacheable mapping
1256 * in vmalloc space, so switch vmalloc
1259 printk(KERN_ALERT
"Reducing vmalloc segment "
1260 "to 4kB pages because of "
1261 "non-cacheable mapping\n");
1262 psize
= mmu_vmalloc_psize
= MMU_PAGE_4K
;
1263 copro_flush_all_slbs(mm
);
1267 #endif /* CONFIG_PPC_64K_PAGES */
1269 if (current
->mm
== mm
)
1270 check_paca_psize(ea
, mm
, psize
, user_region
);
1272 #ifdef CONFIG_PPC_64K_PAGES
1273 if (psize
== MMU_PAGE_64K
)
1274 rc
= __hash_page_64K(ea
, access
, vsid
, ptep
, trap
,
1277 #endif /* CONFIG_PPC_64K_PAGES */
1279 int spp
= subpage_protection(mm
, ea
);
1283 rc
= __hash_page_4K(ea
, access
, vsid
, ptep
, trap
,
1287 /* Dump some info in case of hash insertion failure, they should
1288 * never happen so it is really useful to know if/when they do
1291 hash_failure_debug(ea
, access
, vsid
, trap
, ssize
, psize
,
1292 psize
, pte_val(*ptep
));
1293 #ifndef CONFIG_PPC_64K_PAGES
1294 DBG_LOW(" o-pte: %016lx\n", pte_val(*ptep
));
1296 DBG_LOW(" o-pte: %016lx %016lx\n", pte_val(*ptep
),
1297 pte_val(*(ptep
+ PTRS_PER_PTE
)));
1299 DBG_LOW(" -> rc=%d\n", rc
);
1302 exception_exit(prev_state
);
1305 EXPORT_SYMBOL_GPL(hash_page_mm
);
1307 int hash_page(unsigned long ea
, unsigned long access
, unsigned long trap
,
1308 unsigned long dsisr
)
1310 unsigned long flags
= 0;
1311 struct mm_struct
*mm
= current
->mm
;
1313 if (REGION_ID(ea
) == VMALLOC_REGION_ID
)
1316 if (dsisr
& DSISR_NOHPTE
)
1317 flags
|= HPTE_NOHPTE_UPDATE
;
1319 return hash_page_mm(mm
, ea
, access
, trap
, flags
);
1321 EXPORT_SYMBOL_GPL(hash_page
);
1323 int __hash_page(unsigned long ea
, unsigned long msr
, unsigned long trap
,
1324 unsigned long dsisr
)
1326 unsigned long access
= _PAGE_PRESENT
| _PAGE_READ
;
1327 unsigned long flags
= 0;
1328 struct mm_struct
*mm
= current
->mm
;
1330 if (REGION_ID(ea
) == VMALLOC_REGION_ID
)
1333 if (dsisr
& DSISR_NOHPTE
)
1334 flags
|= HPTE_NOHPTE_UPDATE
;
1336 if (dsisr
& DSISR_ISSTORE
)
1337 access
|= _PAGE_WRITE
;
1339 * We set _PAGE_PRIVILEGED only when
1340 * kernel mode access kernel space.
1342 * _PAGE_PRIVILEGED is NOT set
1343 * 1) when kernel mode access user space
1344 * 2) user space access kernel space.
1346 access
|= _PAGE_PRIVILEGED
;
1347 if ((msr
& MSR_PR
) || (REGION_ID(ea
) == USER_REGION_ID
))
1348 access
&= ~_PAGE_PRIVILEGED
;
1351 access
|= _PAGE_EXEC
;
1353 return hash_page_mm(mm
, ea
, access
, trap
, flags
);
1356 #ifdef CONFIG_PPC_MM_SLICES
1357 static bool should_hash_preload(struct mm_struct
*mm
, unsigned long ea
)
1359 int psize
= get_slice_psize(mm
, ea
);
1361 /* We only prefault standard pages for now */
1362 if (unlikely(psize
!= mm
->context
.user_psize
))
1366 * Don't prefault if subpage protection is enabled for the EA.
1368 if (unlikely((psize
== MMU_PAGE_4K
) && subpage_protection(mm
, ea
)))
1374 static bool should_hash_preload(struct mm_struct
*mm
, unsigned long ea
)
1380 void hash_preload(struct mm_struct
*mm
, unsigned long ea
,
1381 unsigned long access
, unsigned long trap
)
1387 unsigned long flags
;
1388 int rc
, ssize
, update_flags
= 0;
1390 BUG_ON(REGION_ID(ea
) != USER_REGION_ID
);
1392 if (!should_hash_preload(mm
, ea
))
1395 DBG_LOW("hash_preload(mm=%p, mm->pgdir=%p, ea=%016lx, access=%lx,"
1396 " trap=%lx\n", mm
, mm
->pgd
, ea
, access
, trap
);
1398 /* Get Linux PTE if available */
1404 ssize
= user_segment_size(ea
);
1405 vsid
= get_vsid(mm
->context
.id
, ea
, ssize
);
1409 * Hash doesn't like irqs. Walking linux page table with irq disabled
1410 * saves us from holding multiple locks.
1412 local_irq_save(flags
);
1415 * THP pages use update_mmu_cache_pmd. We don't do
1416 * hash preload there. Hence can ignore THP here
1418 ptep
= find_linux_pte_or_hugepte(pgdir
, ea
, NULL
, &hugepage_shift
);
1422 WARN_ON(hugepage_shift
);
1423 #ifdef CONFIG_PPC_64K_PAGES
1424 /* If either H_PAGE_4K_PFN or cache inhibited is set (and we are on
1425 * a 64K kernel), then we don't preload, hash_page() will take
1426 * care of it once we actually try to access the page.
1427 * That way we don't have to duplicate all of the logic for segment
1428 * page size demotion here
1430 if ((pte_val(*ptep
) & H_PAGE_4K_PFN
) || pte_ci(*ptep
))
1432 #endif /* CONFIG_PPC_64K_PAGES */
1434 /* Is that local to this CPU ? */
1435 if (cpumask_equal(mm_cpumask(mm
), cpumask_of(smp_processor_id())))
1436 update_flags
|= HPTE_LOCAL_UPDATE
;
1439 #ifdef CONFIG_PPC_64K_PAGES
1440 if (mm
->context
.user_psize
== MMU_PAGE_64K
)
1441 rc
= __hash_page_64K(ea
, access
, vsid
, ptep
, trap
,
1442 update_flags
, ssize
);
1444 #endif /* CONFIG_PPC_64K_PAGES */
1445 rc
= __hash_page_4K(ea
, access
, vsid
, ptep
, trap
, update_flags
,
1446 ssize
, subpage_protection(mm
, ea
));
1448 /* Dump some info in case of hash insertion failure, they should
1449 * never happen so it is really useful to know if/when they do
1452 hash_failure_debug(ea
, access
, vsid
, trap
, ssize
,
1453 mm
->context
.user_psize
,
1454 mm
->context
.user_psize
,
1457 local_irq_restore(flags
);
1460 /* WARNING: This is called from hash_low_64.S, if you change this prototype,
1461 * do not forget to update the assembly call site !
1463 void flush_hash_page(unsigned long vpn
, real_pte_t pte
, int psize
, int ssize
,
1464 unsigned long flags
)
1466 unsigned long hash
, index
, shift
, hidx
, slot
;
1467 int local
= flags
& HPTE_LOCAL_UPDATE
;
1469 DBG_LOW("flush_hash_page(vpn=%016lx)\n", vpn
);
1470 pte_iterate_hashed_subpages(pte
, psize
, vpn
, index
, shift
) {
1471 hash
= hpt_hash(vpn
, shift
, ssize
);
1472 hidx
= __rpte_to_hidx(pte
, index
);
1473 if (hidx
& _PTEIDX_SECONDARY
)
1475 slot
= (hash
& htab_hash_mask
) * HPTES_PER_GROUP
;
1476 slot
+= hidx
& _PTEIDX_GROUP_IX
;
1477 DBG_LOW(" sub %ld: hash=%lx, hidx=%lx\n", index
, slot
, hidx
);
1479 * We use same base page size and actual psize, because we don't
1480 * use these functions for hugepage
1482 ppc_md
.hpte_invalidate(slot
, vpn
, psize
, psize
, ssize
, local
);
1483 } pte_iterate_hashed_end();
1485 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1486 /* Transactions are not aborted by tlbiel, only tlbie.
1487 * Without, syncing a page back to a block device w/ PIO could pick up
1488 * transactional data (bad!) so we force an abort here. Before the
1489 * sync the page will be made read-only, which will flush_hash_page.
1490 * BIG ISSUE here: if the kernel uses a page from userspace without
1491 * unmapping it first, it may see the speculated version.
1493 if (local
&& cpu_has_feature(CPU_FTR_TM
) &&
1494 current
->thread
.regs
&&
1495 MSR_TM_ACTIVE(current
->thread
.regs
->msr
)) {
1497 tm_abort(TM_CAUSE_TLBI
);
1502 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1503 void flush_hash_hugepage(unsigned long vsid
, unsigned long addr
,
1504 pmd_t
*pmdp
, unsigned int psize
, int ssize
,
1505 unsigned long flags
)
1507 int i
, max_hpte_count
, valid
;
1508 unsigned long s_addr
;
1509 unsigned char *hpte_slot_array
;
1510 unsigned long hidx
, shift
, vpn
, hash
, slot
;
1511 int local
= flags
& HPTE_LOCAL_UPDATE
;
1513 s_addr
= addr
& HPAGE_PMD_MASK
;
1514 hpte_slot_array
= get_hpte_slot_array(pmdp
);
1516 * IF we try to do a HUGE PTE update after a withdraw is done.
1517 * we will find the below NULL. This happens when we do
1518 * split_huge_page_pmd
1520 if (!hpte_slot_array
)
1523 if (ppc_md
.hugepage_invalidate
) {
1524 ppc_md
.hugepage_invalidate(vsid
, s_addr
, hpte_slot_array
,
1525 psize
, ssize
, local
);
1529 * No bluk hpte removal support, invalidate each entry
1531 shift
= mmu_psize_defs
[psize
].shift
;
1532 max_hpte_count
= HPAGE_PMD_SIZE
>> shift
;
1533 for (i
= 0; i
< max_hpte_count
; i
++) {
1535 * 8 bits per each hpte entries
1536 * 000| [ secondary group (one bit) | hidx (3 bits) | valid bit]
1538 valid
= hpte_valid(hpte_slot_array
, i
);
1541 hidx
= hpte_hash_index(hpte_slot_array
, i
);
1544 addr
= s_addr
+ (i
* (1ul << shift
));
1545 vpn
= hpt_vpn(addr
, vsid
, ssize
);
1546 hash
= hpt_hash(vpn
, shift
, ssize
);
1547 if (hidx
& _PTEIDX_SECONDARY
)
1550 slot
= (hash
& htab_hash_mask
) * HPTES_PER_GROUP
;
1551 slot
+= hidx
& _PTEIDX_GROUP_IX
;
1552 ppc_md
.hpte_invalidate(slot
, vpn
, psize
,
1553 MMU_PAGE_16M
, ssize
, local
);
1556 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1557 /* Transactions are not aborted by tlbiel, only tlbie.
1558 * Without, syncing a page back to a block device w/ PIO could pick up
1559 * transactional data (bad!) so we force an abort here. Before the
1560 * sync the page will be made read-only, which will flush_hash_page.
1561 * BIG ISSUE here: if the kernel uses a page from userspace without
1562 * unmapping it first, it may see the speculated version.
1564 if (local
&& cpu_has_feature(CPU_FTR_TM
) &&
1565 current
->thread
.regs
&&
1566 MSR_TM_ACTIVE(current
->thread
.regs
->msr
)) {
1568 tm_abort(TM_CAUSE_TLBI
);
1573 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1575 void flush_hash_range(unsigned long number
, int local
)
1577 if (ppc_md
.flush_hash_range
)
1578 ppc_md
.flush_hash_range(number
, local
);
1581 struct ppc64_tlb_batch
*batch
=
1582 this_cpu_ptr(&ppc64_tlb_batch
);
1584 for (i
= 0; i
< number
; i
++)
1585 flush_hash_page(batch
->vpn
[i
], batch
->pte
[i
],
1586 batch
->psize
, batch
->ssize
, local
);
1591 * low_hash_fault is called when we the low level hash code failed
1592 * to instert a PTE due to an hypervisor error
1594 void low_hash_fault(struct pt_regs
*regs
, unsigned long address
, int rc
)
1596 enum ctx_state prev_state
= exception_enter();
1598 if (user_mode(regs
)) {
1599 #ifdef CONFIG_PPC_SUBPAGE_PROT
1601 _exception(SIGSEGV
, regs
, SEGV_ACCERR
, address
);
1604 _exception(SIGBUS
, regs
, BUS_ADRERR
, address
);
1606 bad_page_fault(regs
, address
, SIGBUS
);
1608 exception_exit(prev_state
);
1611 long hpte_insert_repeating(unsigned long hash
, unsigned long vpn
,
1612 unsigned long pa
, unsigned long rflags
,
1613 unsigned long vflags
, int psize
, int ssize
)
1615 unsigned long hpte_group
;
1619 hpte_group
= ((hash
& htab_hash_mask
) *
1620 HPTES_PER_GROUP
) & ~0x7UL
;
1622 /* Insert into the hash table, primary slot */
1623 slot
= ppc_md
.hpte_insert(hpte_group
, vpn
, pa
, rflags
, vflags
,
1624 psize
, psize
, ssize
);
1626 /* Primary is full, try the secondary */
1627 if (unlikely(slot
== -1)) {
1628 hpte_group
= ((~hash
& htab_hash_mask
) *
1629 HPTES_PER_GROUP
) & ~0x7UL
;
1630 slot
= ppc_md
.hpte_insert(hpte_group
, vpn
, pa
, rflags
,
1631 vflags
| HPTE_V_SECONDARY
,
1632 psize
, psize
, ssize
);
1635 hpte_group
= ((hash
& htab_hash_mask
) *
1636 HPTES_PER_GROUP
)&~0x7UL
;
1638 ppc_md
.hpte_remove(hpte_group
);
1646 #ifdef CONFIG_DEBUG_PAGEALLOC
1647 static void kernel_map_linear_page(unsigned long vaddr
, unsigned long lmi
)
1650 unsigned long vsid
= get_kernel_vsid(vaddr
, mmu_kernel_ssize
);
1651 unsigned long vpn
= hpt_vpn(vaddr
, vsid
, mmu_kernel_ssize
);
1652 unsigned long mode
= htab_convert_pte_flags(pgprot_val(PAGE_KERNEL
));
1655 hash
= hpt_hash(vpn
, PAGE_SHIFT
, mmu_kernel_ssize
);
1657 /* Don't create HPTE entries for bad address */
1661 ret
= hpte_insert_repeating(hash
, vpn
, __pa(vaddr
), mode
,
1663 mmu_linear_psize
, mmu_kernel_ssize
);
1666 spin_lock(&linear_map_hash_lock
);
1667 BUG_ON(linear_map_hash_slots
[lmi
] & 0x80);
1668 linear_map_hash_slots
[lmi
] = ret
| 0x80;
1669 spin_unlock(&linear_map_hash_lock
);
1672 static void kernel_unmap_linear_page(unsigned long vaddr
, unsigned long lmi
)
1674 unsigned long hash
, hidx
, slot
;
1675 unsigned long vsid
= get_kernel_vsid(vaddr
, mmu_kernel_ssize
);
1676 unsigned long vpn
= hpt_vpn(vaddr
, vsid
, mmu_kernel_ssize
);
1678 hash
= hpt_hash(vpn
, PAGE_SHIFT
, mmu_kernel_ssize
);
1679 spin_lock(&linear_map_hash_lock
);
1680 BUG_ON(!(linear_map_hash_slots
[lmi
] & 0x80));
1681 hidx
= linear_map_hash_slots
[lmi
] & 0x7f;
1682 linear_map_hash_slots
[lmi
] = 0;
1683 spin_unlock(&linear_map_hash_lock
);
1684 if (hidx
& _PTEIDX_SECONDARY
)
1686 slot
= (hash
& htab_hash_mask
) * HPTES_PER_GROUP
;
1687 slot
+= hidx
& _PTEIDX_GROUP_IX
;
1688 ppc_md
.hpte_invalidate(slot
, vpn
, mmu_linear_psize
, mmu_linear_psize
,
1689 mmu_kernel_ssize
, 0);
1692 void __kernel_map_pages(struct page
*page
, int numpages
, int enable
)
1694 unsigned long flags
, vaddr
, lmi
;
1697 local_irq_save(flags
);
1698 for (i
= 0; i
< numpages
; i
++, page
++) {
1699 vaddr
= (unsigned long)page_address(page
);
1700 lmi
= __pa(vaddr
) >> PAGE_SHIFT
;
1701 if (lmi
>= linear_map_hash_count
)
1704 kernel_map_linear_page(vaddr
, lmi
);
1706 kernel_unmap_linear_page(vaddr
, lmi
);
1708 local_irq_restore(flags
);
1710 #endif /* CONFIG_DEBUG_PAGEALLOC */
1712 void hash__setup_initial_memory_limit(phys_addr_t first_memblock_base
,
1713 phys_addr_t first_memblock_size
)
1715 /* We don't currently support the first MEMBLOCK not mapping 0
1716 * physical on those processors
1718 BUG_ON(first_memblock_base
!= 0);
1720 /* On LPAR systems, the first entry is our RMA region,
1721 * non-LPAR 64-bit hash MMU systems don't have a limitation
1722 * on real mode access, but using the first entry works well
1723 * enough. We also clamp it to 1G to avoid some funky things
1724 * such as RTAS bugs etc...
1726 ppc64_rma_size
= min_t(u64
, first_memblock_size
, 0x40000000);
1728 /* Finally limit subsequent allocations */
1729 memblock_set_current_limit(ppc64_rma_size
);