2 * Page table handling routines for radix page table.
4 * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
11 #include <linux/sched/mm.h>
12 #include <linux/memblock.h>
13 #include <linux/of_fdt.h>
15 #include <asm/pgtable.h>
16 #include <asm/pgalloc.h>
18 #include <asm/machdep.h>
20 #include <asm/firmware.h>
21 #include <asm/powernv.h>
23 #include <trace/events/thp.h>
25 static int native_register_process_table(unsigned long base
, unsigned long pg_sz
,
26 unsigned long table_size
)
28 unsigned long patb1
= base
| table_size
| PATB_GR
;
30 partition_tb
->patb1
= cpu_to_be64(patb1
);
34 static __ref
void *early_alloc_pgtable(unsigned long size
)
38 pt
= __va(memblock_alloc_base(size
, size
, MEMBLOCK_ALLOC_ANYWHERE
));
44 int radix__map_kernel_page(unsigned long ea
, unsigned long pa
,
46 unsigned int map_page_size
)
53 * Make sure task size is correct as per the max adddr
55 BUILD_BUG_ON(TASK_SIZE_USER64
> RADIX_PGTABLE_RANGE
);
56 if (slab_is_available()) {
57 pgdp
= pgd_offset_k(ea
);
58 pudp
= pud_alloc(&init_mm
, pgdp
, ea
);
61 if (map_page_size
== PUD_SIZE
) {
65 pmdp
= pmd_alloc(&init_mm
, pudp
, ea
);
68 if (map_page_size
== PMD_SIZE
) {
69 ptep
= pmdp_ptep(pmdp
);
72 ptep
= pte_alloc_kernel(pmdp
, ea
);
76 pgdp
= pgd_offset_k(ea
);
77 if (pgd_none(*pgdp
)) {
78 pudp
= early_alloc_pgtable(PUD_TABLE_SIZE
);
80 pgd_populate(&init_mm
, pgdp
, pudp
);
82 pudp
= pud_offset(pgdp
, ea
);
83 if (map_page_size
== PUD_SIZE
) {
87 if (pud_none(*pudp
)) {
88 pmdp
= early_alloc_pgtable(PMD_TABLE_SIZE
);
90 pud_populate(&init_mm
, pudp
, pmdp
);
92 pmdp
= pmd_offset(pudp
, ea
);
93 if (map_page_size
== PMD_SIZE
) {
94 ptep
= pmdp_ptep(pmdp
);
97 if (!pmd_present(*pmdp
)) {
98 ptep
= early_alloc_pgtable(PAGE_SIZE
);
100 pmd_populate_kernel(&init_mm
, pmdp
, ptep
);
102 ptep
= pte_offset_kernel(pmdp
, ea
);
106 set_pte_at(&init_mm
, ea
, ptep
, pfn_pte(pa
>> PAGE_SHIFT
, flags
));
111 static inline void __meminit
print_mapping(unsigned long start
,
118 pr_info("Mapped range 0x%lx - 0x%lx with 0x%lx\n", start
, end
, size
);
121 static int __meminit
create_physical_mapping(unsigned long start
,
124 unsigned long addr
, mapping_size
= 0;
126 start
= _ALIGN_UP(start
, PAGE_SIZE
);
127 for (addr
= start
; addr
< end
; addr
+= mapping_size
) {
128 unsigned long gap
, previous_size
;
132 previous_size
= mapping_size
;
134 if (IS_ALIGNED(addr
, PUD_SIZE
) && gap
>= PUD_SIZE
&&
135 mmu_psize_defs
[MMU_PAGE_1G
].shift
)
136 mapping_size
= PUD_SIZE
;
137 else if (IS_ALIGNED(addr
, PMD_SIZE
) && gap
>= PMD_SIZE
&&
138 mmu_psize_defs
[MMU_PAGE_2M
].shift
)
139 mapping_size
= PMD_SIZE
;
141 mapping_size
= PAGE_SIZE
;
143 if (mapping_size
!= previous_size
) {
144 print_mapping(start
, addr
, previous_size
);
148 rc
= radix__map_kernel_page((unsigned long)__va(addr
), addr
,
149 PAGE_KERNEL_X
, mapping_size
);
154 print_mapping(start
, addr
, mapping_size
);
158 static void __init
radix_init_pgtable(void)
160 unsigned long rts_field
;
161 struct memblock_region
*reg
;
163 /* We don't support slb for radix */
166 * Create the linear mapping, using standard page size for now
168 for_each_memblock(memory
, reg
)
169 WARN_ON(create_physical_mapping(reg
->base
,
170 reg
->base
+ reg
->size
));
172 * Allocate Partition table and process table for the
175 BUILD_BUG_ON_MSG((PRTB_SIZE_SHIFT
> 36), "Process table size too large.");
176 process_tb
= early_alloc_pgtable(1UL << PRTB_SIZE_SHIFT
);
178 * Fill in the process table.
180 rts_field
= radix__get_tree_size();
181 process_tb
->prtb0
= cpu_to_be64(rts_field
| __pa(init_mm
.pgd
) | RADIX_PGD_INDEX_SIZE
);
183 * Fill in the partition table. We are suppose to use effective address
184 * of process table here. But our linear mapping also enable us to use
185 * physical address here.
187 register_process_table(__pa(process_tb
), 0, PRTB_SIZE_SHIFT
- 12);
188 pr_info("Process table %p and radix root for kernel: %p\n", process_tb
, init_mm
.pgd
);
189 asm volatile("ptesync" : : : "memory");
190 asm volatile(PPC_TLBIE_5(%0,%1,2,1,1) : :
191 "r" (TLBIEL_INVAL_SET_LPID
), "r" (0));
192 asm volatile("eieio; tlbsync; ptesync" : : : "memory");
195 static void __init
radix_init_partition_table(void)
197 unsigned long rts_field
, dw0
;
199 mmu_partition_table_init();
200 rts_field
= radix__get_tree_size();
201 dw0
= rts_field
| __pa(init_mm
.pgd
) | RADIX_PGD_INDEX_SIZE
| PATB_HR
;
202 mmu_partition_table_set_entry(0, dw0
, 0);
204 pr_info("Initializing Radix MMU\n");
205 pr_info("Partition table %p\n", partition_tb
);
208 void __init
radix_init_native(void)
210 register_process_table
= native_register_process_table
;
213 static int __init
get_idx_from_shift(unsigned int shift
)
234 static int __init
radix_dt_scan_page_sizes(unsigned long node
,
235 const char *uname
, int depth
,
242 const char *type
= of_get_flat_dt_prop(node
, "device_type", NULL
);
244 /* We are scanning "cpu" nodes only */
245 if (type
== NULL
|| strcmp(type
, "cpu") != 0)
248 prop
= of_get_flat_dt_prop(node
, "ibm,processor-radix-AP-encodings", &size
);
252 pr_info("Page sizes from device-tree:\n");
253 for (; size
>= 4; size
-= 4, ++prop
) {
255 struct mmu_psize_def
*def
;
257 /* top 3 bit is AP encoding */
258 shift
= be32_to_cpu(prop
[0]) & ~(0xe << 28);
259 ap
= be32_to_cpu(prop
[0]) >> 29;
260 pr_info("Page size shift = %d AP=0x%x\n", shift
, ap
);
262 idx
= get_idx_from_shift(shift
);
266 def
= &mmu_psize_defs
[idx
];
272 cur_cpu_spec
->mmu_features
&= ~MMU_FTR_NO_SLBIE_B
;
276 void __init
radix__early_init_devtree(void)
281 * Try to find the available page sizes in the device-tree
283 rc
= of_scan_flat_dt(radix_dt_scan_page_sizes
, NULL
);
284 if (rc
!= 0) /* Found */
287 * let's assume we have page 4k and 64k support
289 mmu_psize_defs
[MMU_PAGE_4K
].shift
= 12;
290 mmu_psize_defs
[MMU_PAGE_4K
].ap
= 0x0;
292 mmu_psize_defs
[MMU_PAGE_64K
].shift
= 16;
293 mmu_psize_defs
[MMU_PAGE_64K
].ap
= 0x5;
295 #ifdef CONFIG_SPARSEMEM_VMEMMAP
296 if (mmu_psize_defs
[MMU_PAGE_2M
].shift
) {
298 * map vmemmap using 2M if available
300 mmu_vmemmap_psize
= MMU_PAGE_2M
;
302 #endif /* CONFIG_SPARSEMEM_VMEMMAP */
306 static void update_hid_for_radix(void)
309 unsigned long rb
= 3UL << PPC_BITLSHIFT(53); /* IS = 3 */
311 asm volatile("ptesync": : :"memory");
312 /* prs = 0, ric = 2, rs = 0, r = 1 is = 3 */
313 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
314 : : "r"(rb
), "i"(1), "i"(0), "i"(2), "r"(0) : "memory");
315 /* prs = 1, ric = 2, rs = 0, r = 1 is = 3 */
316 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
317 : : "r"(rb
), "i"(1), "i"(1), "i"(2), "r"(0) : "memory");
318 asm volatile("eieio; tlbsync; ptesync; isync; slbia": : :"memory");
322 hid0
= mfspr(SPRN_HID0
);
323 hid0
|= HID0_POWER9_RADIX
;
324 mtspr(SPRN_HID0
, hid0
);
325 asm volatile("isync": : :"memory");
327 /* Wait for it to happen */
328 while (!(mfspr(SPRN_HID0
) & HID0_POWER9_RADIX
))
332 static void radix_init_amor(void)
335 * In HV mode, we init AMOR (Authority Mask Override Register) so that
336 * the hypervisor and guest can setup IAMR (Instruction Authority Mask
337 * Register), enable key 0 and set it to 1.
339 * AMOR = 0b1100 .... 0000 (Mask for key 0 is 11)
341 mtspr(SPRN_AMOR
, (3ul << 62));
344 static void radix_init_iamr(void)
349 * The IAMR should set to 0 on DD1.
351 if (cpu_has_feature(CPU_FTR_POWER9_DD1
))
357 * Radix always uses key0 of the IAMR to determine if an access is
358 * allowed. We set bit 0 (IBM bit 1) of key0, to prevent instruction
361 mtspr(SPRN_IAMR
, iamr
);
364 void __init
radix__early_init_mmu(void)
368 #ifdef CONFIG_PPC_64K_PAGES
369 /* PAGE_SIZE mappings */
370 mmu_virtual_psize
= MMU_PAGE_64K
;
372 mmu_virtual_psize
= MMU_PAGE_4K
;
375 #ifdef CONFIG_SPARSEMEM_VMEMMAP
376 /* vmemmap mapping */
377 mmu_vmemmap_psize
= mmu_virtual_psize
;
380 * initialize page table size
382 __pte_index_size
= RADIX_PTE_INDEX_SIZE
;
383 __pmd_index_size
= RADIX_PMD_INDEX_SIZE
;
384 __pud_index_size
= RADIX_PUD_INDEX_SIZE
;
385 __pgd_index_size
= RADIX_PGD_INDEX_SIZE
;
386 __pmd_cache_index
= RADIX_PMD_INDEX_SIZE
;
387 __pte_table_size
= RADIX_PTE_TABLE_SIZE
;
388 __pmd_table_size
= RADIX_PMD_TABLE_SIZE
;
389 __pud_table_size
= RADIX_PUD_TABLE_SIZE
;
390 __pgd_table_size
= RADIX_PGD_TABLE_SIZE
;
392 __pmd_val_bits
= RADIX_PMD_VAL_BITS
;
393 __pud_val_bits
= RADIX_PUD_VAL_BITS
;
394 __pgd_val_bits
= RADIX_PGD_VAL_BITS
;
396 __kernel_virt_start
= RADIX_KERN_VIRT_START
;
397 __kernel_virt_size
= RADIX_KERN_VIRT_SIZE
;
398 __vmalloc_start
= RADIX_VMALLOC_START
;
399 __vmalloc_end
= RADIX_VMALLOC_END
;
400 vmemmap
= (struct page
*)RADIX_VMEMMAP_BASE
;
401 ioremap_bot
= IOREMAP_BASE
;
404 pci_io_base
= ISA_IO_BASE
;
408 * For now radix also use the same frag size
410 __pte_frag_nr
= H_PTE_FRAG_NR
;
411 __pte_frag_size_shift
= H_PTE_FRAG_SIZE_SHIFT
;
413 if (!firmware_has_feature(FW_FEATURE_LPAR
)) {
415 if (cpu_has_feature(CPU_FTR_POWER9_DD1
))
416 update_hid_for_radix();
417 lpcr
= mfspr(SPRN_LPCR
);
418 mtspr(SPRN_LPCR
, lpcr
| LPCR_UPRT
| LPCR_HR
);
419 radix_init_partition_table();
422 radix_init_pseries();
425 memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE
);
428 radix_init_pgtable();
431 void radix__early_init_mmu_secondary(void)
435 * update partition table control register and UPRT
437 if (!firmware_has_feature(FW_FEATURE_LPAR
)) {
439 if (cpu_has_feature(CPU_FTR_POWER9_DD1
))
440 update_hid_for_radix();
442 lpcr
= mfspr(SPRN_LPCR
);
443 mtspr(SPRN_LPCR
, lpcr
| LPCR_UPRT
| LPCR_HR
);
446 __pa(partition_tb
) | (PATB_SIZE_SHIFT
- 12));
452 void radix__mmu_cleanup_all(void)
456 if (!firmware_has_feature(FW_FEATURE_LPAR
)) {
457 lpcr
= mfspr(SPRN_LPCR
);
458 mtspr(SPRN_LPCR
, lpcr
& ~LPCR_UPRT
);
460 powernv_set_nmmu_ptcr(0);
461 radix__flush_tlb_all();
465 void radix__setup_initial_memory_limit(phys_addr_t first_memblock_base
,
466 phys_addr_t first_memblock_size
)
468 /* We don't currently support the first MEMBLOCK not mapping 0
469 * physical on those processors
471 BUG_ON(first_memblock_base
!= 0);
473 * We limit the allocation that depend on ppc64_rma_size
474 * to first_memblock_size. We also clamp it to 1GB to
475 * avoid some funky things such as RTAS bugs.
477 * On radix config we really don't have a limitation
478 * on real mode access. But keeping it as above works
481 ppc64_rma_size
= min_t(u64
, first_memblock_size
, 0x40000000);
483 * Finally limit subsequent allocations. We really don't want
484 * to limit the memblock allocations to rma_size. FIXME!! should
485 * we even limit at all ?
487 memblock_set_current_limit(first_memblock_base
+ first_memblock_size
);
490 #ifdef CONFIG_MEMORY_HOTPLUG
491 static void free_pte_table(pte_t
*pte_start
, pmd_t
*pmd
)
496 for (i
= 0; i
< PTRS_PER_PTE
; i
++) {
502 pte_free_kernel(&init_mm
, pte_start
);
506 static void free_pmd_table(pmd_t
*pmd_start
, pud_t
*pud
)
511 for (i
= 0; i
< PTRS_PER_PMD
; i
++) {
517 pmd_free(&init_mm
, pmd_start
);
521 static void remove_pte_table(pte_t
*pte_start
, unsigned long addr
,
527 pte
= pte_start
+ pte_index(addr
);
528 for (; addr
< end
; addr
= next
, pte
++) {
529 next
= (addr
+ PAGE_SIZE
) & PAGE_MASK
;
533 if (!pte_present(*pte
))
536 if (!PAGE_ALIGNED(addr
) || !PAGE_ALIGNED(next
)) {
538 * The vmemmap_free() and remove_section_mapping()
539 * codepaths call us with aligned addresses.
541 WARN_ONCE(1, "%s: unaligned range\n", __func__
);
545 pte_clear(&init_mm
, addr
, pte
);
549 static void remove_pmd_table(pmd_t
*pmd_start
, unsigned long addr
,
556 pmd
= pmd_start
+ pmd_index(addr
);
557 for (; addr
< end
; addr
= next
, pmd
++) {
558 next
= pmd_addr_end(addr
, end
);
560 if (!pmd_present(*pmd
))
563 if (pmd_huge(*pmd
)) {
564 if (!IS_ALIGNED(addr
, PMD_SIZE
) ||
565 !IS_ALIGNED(next
, PMD_SIZE
)) {
566 WARN_ONCE(1, "%s: unaligned range\n", __func__
);
570 pte_clear(&init_mm
, addr
, (pte_t
*)pmd
);
574 pte_base
= (pte_t
*)pmd_page_vaddr(*pmd
);
575 remove_pte_table(pte_base
, addr
, next
);
576 free_pte_table(pte_base
, pmd
);
580 static void remove_pud_table(pud_t
*pud_start
, unsigned long addr
,
587 pud
= pud_start
+ pud_index(addr
);
588 for (; addr
< end
; addr
= next
, pud
++) {
589 next
= pud_addr_end(addr
, end
);
591 if (!pud_present(*pud
))
594 if (pud_huge(*pud
)) {
595 if (!IS_ALIGNED(addr
, PUD_SIZE
) ||
596 !IS_ALIGNED(next
, PUD_SIZE
)) {
597 WARN_ONCE(1, "%s: unaligned range\n", __func__
);
601 pte_clear(&init_mm
, addr
, (pte_t
*)pud
);
605 pmd_base
= (pmd_t
*)pud_page_vaddr(*pud
);
606 remove_pmd_table(pmd_base
, addr
, next
);
607 free_pmd_table(pmd_base
, pud
);
611 static void remove_pagetable(unsigned long start
, unsigned long end
)
613 unsigned long addr
, next
;
617 spin_lock(&init_mm
.page_table_lock
);
619 for (addr
= start
; addr
< end
; addr
= next
) {
620 next
= pgd_addr_end(addr
, end
);
622 pgd
= pgd_offset_k(addr
);
623 if (!pgd_present(*pgd
))
626 if (pgd_huge(*pgd
)) {
627 if (!IS_ALIGNED(addr
, PGDIR_SIZE
) ||
628 !IS_ALIGNED(next
, PGDIR_SIZE
)) {
629 WARN_ONCE(1, "%s: unaligned range\n", __func__
);
633 pte_clear(&init_mm
, addr
, (pte_t
*)pgd
);
637 pud_base
= (pud_t
*)pgd_page_vaddr(*pgd
);
638 remove_pud_table(pud_base
, addr
, next
);
641 spin_unlock(&init_mm
.page_table_lock
);
642 radix__flush_tlb_kernel_range(start
, end
);
645 int __ref
radix__create_section_mapping(unsigned long start
, unsigned long end
)
647 return create_physical_mapping(start
, end
);
650 int radix__remove_section_mapping(unsigned long start
, unsigned long end
)
652 remove_pagetable(start
, end
);
655 #endif /* CONFIG_MEMORY_HOTPLUG */
657 #ifdef CONFIG_SPARSEMEM_VMEMMAP
658 int __meminit
radix__vmemmap_create_mapping(unsigned long start
,
659 unsigned long page_size
,
662 /* Create a PTE encoding */
663 unsigned long flags
= _PAGE_PRESENT
| _PAGE_ACCESSED
| _PAGE_KERNEL_RW
;
665 BUG_ON(radix__map_kernel_page(start
, phys
, __pgprot(flags
), page_size
));
669 #ifdef CONFIG_MEMORY_HOTPLUG
670 void radix__vmemmap_remove_mapping(unsigned long start
, unsigned long page_size
)
672 remove_pagetable(start
, start
+ page_size
);
677 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
679 unsigned long radix__pmd_hugepage_update(struct mm_struct
*mm
, unsigned long addr
,
680 pmd_t
*pmdp
, unsigned long clr
,
685 #ifdef CONFIG_DEBUG_VM
686 WARN_ON(!radix__pmd_trans_huge(*pmdp
));
687 assert_spin_locked(&mm
->page_table_lock
);
690 old
= radix__pte_update(mm
, addr
, (pte_t
*)pmdp
, clr
, set
, 1);
691 trace_hugepage_update(addr
, old
, clr
, set
);
696 pmd_t
radix__pmdp_collapse_flush(struct vm_area_struct
*vma
, unsigned long address
,
702 VM_BUG_ON(address
& ~HPAGE_PMD_MASK
);
703 VM_BUG_ON(radix__pmd_trans_huge(*pmdp
));
705 * khugepaged calls this for normal pmd
709 /*FIXME!! Verify whether we need this kick below */
710 kick_all_cpus_sync();
711 flush_tlb_range(vma
, address
, address
+ HPAGE_PMD_SIZE
);
716 * For us pgtable_t is pte_t *. Inorder to save the deposisted
717 * page table, we consider the allocated page table as a list
718 * head. On withdraw we need to make sure we zero out the used
719 * list_head memory area.
721 void radix__pgtable_trans_huge_deposit(struct mm_struct
*mm
, pmd_t
*pmdp
,
724 struct list_head
*lh
= (struct list_head
*) pgtable
;
726 assert_spin_locked(pmd_lockptr(mm
, pmdp
));
729 if (!pmd_huge_pte(mm
, pmdp
))
732 list_add(lh
, (struct list_head
*) pmd_huge_pte(mm
, pmdp
));
733 pmd_huge_pte(mm
, pmdp
) = pgtable
;
736 pgtable_t
radix__pgtable_trans_huge_withdraw(struct mm_struct
*mm
, pmd_t
*pmdp
)
740 struct list_head
*lh
;
742 assert_spin_locked(pmd_lockptr(mm
, pmdp
));
745 pgtable
= pmd_huge_pte(mm
, pmdp
);
746 lh
= (struct list_head
*) pgtable
;
748 pmd_huge_pte(mm
, pmdp
) = NULL
;
750 pmd_huge_pte(mm
, pmdp
) = (pgtable_t
) lh
->next
;
753 ptep
= (pte_t
*) pgtable
;
761 pmd_t
radix__pmdp_huge_get_and_clear(struct mm_struct
*mm
,
762 unsigned long addr
, pmd_t
*pmdp
)
767 old
= radix__pmd_hugepage_update(mm
, addr
, pmdp
, ~0UL, 0);
768 old_pmd
= __pmd(old
);
770 * Serialize against find_linux_pte_or_hugepte which does lock-less
771 * lookup in page tables with local interrupts disabled. For huge pages
772 * it casts pmd_t to pte_t. Since format of pte_t is different from
773 * pmd_t we want to prevent transit from pmd pointing to page table
774 * to pmd pointing to huge page (and back) while interrupts are disabled.
775 * We clear pmd to possibly replace it with page table pointer in
776 * different code paths. So make sure we wait for the parallel
777 * find_linux_pte_or_hugepage to finish.
779 kick_all_cpus_sync();
783 int radix__has_transparent_hugepage(void)
785 /* For radix 2M at PMD level means thp */
786 if (mmu_psize_defs
[MMU_PAGE_2M
].shift
== PMD_SHIFT
)
790 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */