1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright 2005, Paul Mackerras, IBM Corporation.
4 * Copyright 2009, Benjamin Herrenschmidt, IBM Corporation.
5 * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation.
8 #include <linux/sched.h>
9 #include <linux/mm_types.h>
12 #include <asm/sections.h>
16 #include <mm/mmu_decl.h>
18 #define CREATE_TRACE_POINTS
19 #include <trace/events/thp.h>
21 #if H_PGTABLE_RANGE > (USER_VSID_RANGE * (TASK_SIZE_USER64 / TASK_CONTEXT_SIZE))
22 #warning Limited user VSID range means pagetable space is wasted
25 #ifdef CONFIG_SPARSEMEM_VMEMMAP
27 * vmemmap is the starting address of the virtual address space where
28 * struct pages are allocated for all possible PFNs present on the system
29 * including holes and bad memory (hence sparse). These virtual struct
30 * pages are stored in sequence in this virtual address space irrespective
31 * of the fact whether the corresponding PFN is valid or not. This achieves
32 * constant relationship between address of struct page and its PFN.
34 * During boot or memory hotplug operation when a new memory section is
35 * added, physical memory allocation (including hash table bolting) will
36 * be performed for the set of struct pages which are part of the memory
37 * section. This saves memory by not allocating struct pages for PFNs
38 * which are not valid.
40 * ----------------------------------------------
41 * | PHYSICAL ALLOCATION OF VIRTUAL STRUCT PAGES|
42 * ----------------------------------------------
44 * f000000000000000 c000000000000000
45 * vmemmap +--------------+ +--------------+
46 * + | page struct | +--------------> | page struct |
47 * | +--------------+ +--------------+
48 * | | page struct | +--------------> | page struct |
49 * | +--------------+ | +--------------+
50 * | | page struct | + +------> | page struct |
51 * | +--------------+ | +--------------+
52 * | | page struct | | +--> | page struct |
53 * | +--------------+ | | +--------------+
54 * | | page struct | | |
55 * | +--------------+ | |
56 * | | page struct | | |
57 * | +--------------+ | |
58 * | | page struct | | |
59 * | +--------------+ | |
60 * | | page struct | | |
61 * | +--------------+ | |
62 * | | page struct | +-------+ |
63 * | +--------------+ |
64 * | | page struct | +-----------+
66 * | | page struct | No mapping
68 * | | page struct | No mapping
71 * -----------------------------------------
72 * | RELATION BETWEEN STRUCT PAGES AND PFNS|
73 * -----------------------------------------
75 * vmemmap +--------------+ +---------------+
76 * + | page struct | +-------------> | PFN |
77 * | +--------------+ +---------------+
78 * | | page struct | +-------------> | PFN |
79 * | +--------------+ +---------------+
80 * | | page struct | +-------------> | PFN |
81 * | +--------------+ +---------------+
82 * | | page struct | +-------------> | PFN |
83 * | +--------------+ +---------------+
89 * | +--------------+ +---------------+
90 * | | page struct | +-------------> | PFN |
91 * | +--------------+ +---------------+
95 * | +--------------+ +---------------+
96 * | | page struct | +-------------> | PFN |
97 * | +--------------+ +---------------+
98 * | | page struct | +-------------> | PFN |
99 * v +--------------+ +---------------+
102 * On hash-based CPUs, the vmemmap is bolted in the hash table.
105 int __meminit
hash__vmemmap_create_mapping(unsigned long start
,
106 unsigned long page_size
,
111 if ((start
+ page_size
) >= H_VMEMMAP_END
) {
112 pr_warn("Outside the supported range\n");
116 rc
= htab_bolt_mapping(start
, start
+ page_size
, phys
,
117 pgprot_val(PAGE_KERNEL
),
118 mmu_vmemmap_psize
, mmu_kernel_ssize
);
120 int rc2
= htab_remove_mapping(start
, start
+ page_size
,
123 BUG_ON(rc2
&& (rc2
!= -ENOENT
));
128 #ifdef CONFIG_MEMORY_HOTPLUG
129 void hash__vmemmap_remove_mapping(unsigned long start
,
130 unsigned long page_size
)
132 int rc
= htab_remove_mapping(start
, start
+ page_size
,
135 BUG_ON((rc
< 0) && (rc
!= -ENOENT
));
136 WARN_ON(rc
== -ENOENT
);
139 #endif /* CONFIG_SPARSEMEM_VMEMMAP */
142 * map_kernel_page currently only called by __ioremap
143 * map_kernel_page adds an entry to the ioremap page table
144 * and adds an entry to the HPT, possibly bolting it
146 int hash__map_kernel_page(unsigned long ea
, unsigned long pa
, pgprot_t prot
)
154 BUILD_BUG_ON(TASK_SIZE_USER64
> H_PGTABLE_RANGE
);
155 if (slab_is_available()) {
156 pgdp
= pgd_offset_k(ea
);
157 p4dp
= p4d_offset(pgdp
, ea
);
158 pudp
= pud_alloc(&init_mm
, p4dp
, ea
);
161 pmdp
= pmd_alloc(&init_mm
, pudp
, ea
);
164 ptep
= pte_alloc_kernel(pmdp
, ea
);
167 set_pte_at(&init_mm
, ea
, ptep
, pfn_pte(pa
>> PAGE_SHIFT
, prot
));
170 * If the mm subsystem is not fully up, we cannot create a
171 * linux page table entry for this mapping. Simply bolt an
172 * entry in the hardware page table.
175 if (htab_bolt_mapping(ea
, ea
+ PAGE_SIZE
, pa
, pgprot_val(prot
),
176 mmu_io_psize
, mmu_kernel_ssize
)) {
177 printk(KERN_ERR
"Failed to do bolted mapping IO "
178 "memory at %016lx !\n", pa
);
187 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
189 unsigned long hash__pmd_hugepage_update(struct mm_struct
*mm
, unsigned long addr
,
190 pmd_t
*pmdp
, unsigned long clr
,
196 #ifdef CONFIG_DEBUG_VM
197 WARN_ON(!hash__pmd_trans_huge(*pmdp
) && !pmd_devmap(*pmdp
));
198 assert_spin_locked(pmd_lockptr(mm
, pmdp
));
201 __asm__
__volatile__(
209 : "=&r" (old_be
), "=&r" (tmp
), "=m" (*pmdp
)
210 : "r" (pmdp
), "r" (cpu_to_be64(clr
)), "m" (*pmdp
),
211 "r" (cpu_to_be64(H_PAGE_BUSY
)), "r" (cpu_to_be64(set
))
214 old
= be64_to_cpu(old_be
);
216 trace_hugepage_update(addr
, old
, clr
, set
);
217 if (old
& H_PAGE_HASHPTE
)
218 hpte_do_hugepage_flush(mm
, addr
, pmdp
, old
);
222 pmd_t
hash__pmdp_collapse_flush(struct vm_area_struct
*vma
, unsigned long address
,
227 VM_BUG_ON(address
& ~HPAGE_PMD_MASK
);
228 VM_BUG_ON(pmd_trans_huge(*pmdp
));
229 VM_BUG_ON(pmd_devmap(*pmdp
));
234 * Wait for all pending hash_page to finish. This is needed
235 * in case of subpage collapse. When we collapse normal pages
236 * to hugepage, we first clear the pmd, then invalidate all
237 * the PTE entries. The assumption here is that any low level
238 * page fault will see a none pmd and take the slow path that
239 * will wait on mmap_lock. But we could very well be in a
240 * hash_page with local ptep pointer value. Such a hash page
241 * can result in adding new HPTE entries for normal subpages.
242 * That means we could be modifying the page content as we
243 * copy them to a huge page. So wait for parallel hash_page
244 * to finish before invalidating HPTE entries. We can do this
245 * by sending an IPI to all the cpus and executing a dummy
248 serialize_against_pte_lookup(vma
->vm_mm
);
250 * Now invalidate the hpte entries in the range
251 * covered by pmd. This make sure we take a
252 * fault and will find the pmd as none, which will
253 * result in a major fault which takes mmap_lock and
254 * hence wait for collapse to complete. Without this
255 * the __collapse_huge_page_copy can result in copying
258 flush_tlb_pmd_range(vma
->vm_mm
, &pmd
, address
);
263 * We want to put the pgtable in pmd and use pgtable for tracking
264 * the base page size hptes
266 void hash__pgtable_trans_huge_deposit(struct mm_struct
*mm
, pmd_t
*pmdp
,
269 pgtable_t
*pgtable_slot
;
271 assert_spin_locked(pmd_lockptr(mm
, pmdp
));
273 * we store the pgtable in the second half of PMD
275 pgtable_slot
= (pgtable_t
*)pmdp
+ PTRS_PER_PMD
;
276 *pgtable_slot
= pgtable
;
278 * expose the deposited pgtable to other cpus.
279 * before we set the hugepage PTE at pmd level
280 * hash fault code looks at the deposted pgtable
281 * to store hash index values.
286 pgtable_t
hash__pgtable_trans_huge_withdraw(struct mm_struct
*mm
, pmd_t
*pmdp
)
289 pgtable_t
*pgtable_slot
;
291 assert_spin_locked(pmd_lockptr(mm
, pmdp
));
293 pgtable_slot
= (pgtable_t
*)pmdp
+ PTRS_PER_PMD
;
294 pgtable
= *pgtable_slot
;
296 * Once we withdraw, mark the entry NULL.
298 *pgtable_slot
= NULL
;
300 * We store HPTE information in the deposited PTE fragment.
301 * zero out the content on withdraw.
303 memset(pgtable
, 0, PTE_FRAG_SIZE
);
308 * A linux hugepage PMD was changed and the corresponding hash table entries
309 * neesd to be flushed.
311 void hpte_do_hugepage_flush(struct mm_struct
*mm
, unsigned long addr
,
312 pmd_t
*pmdp
, unsigned long old_pmd
)
317 unsigned long flags
= 0;
319 /* get the base page size,vsid and segment size */
320 #ifdef CONFIG_DEBUG_VM
321 psize
= get_slice_psize(mm
, addr
);
322 BUG_ON(psize
== MMU_PAGE_16M
);
324 if (old_pmd
& H_PAGE_COMBO
)
327 psize
= MMU_PAGE_64K
;
329 if (!is_kernel_addr(addr
)) {
330 ssize
= user_segment_size(addr
);
331 vsid
= get_user_vsid(&mm
->context
, addr
, ssize
);
334 vsid
= get_kernel_vsid(addr
, mmu_kernel_ssize
);
335 ssize
= mmu_kernel_ssize
;
338 if (mm_is_thread_local(mm
))
339 flags
|= HPTE_LOCAL_UPDATE
;
341 return flush_hash_hugepage(vsid
, addr
, pmdp
, psize
, ssize
, flags
);
344 pmd_t
hash__pmdp_huge_get_and_clear(struct mm_struct
*mm
,
345 unsigned long addr
, pmd_t
*pmdp
)
350 pgtable_t
*pgtable_slot
;
352 old
= pmd_hugepage_update(mm
, addr
, pmdp
, ~0UL, 0);
353 old_pmd
= __pmd(old
);
355 * We have pmd == none and we are holding page_table_lock.
356 * So we can safely go and clear the pgtable hash
359 pgtable_slot
= (pgtable_t
*)pmdp
+ PTRS_PER_PMD
;
360 pgtable
= *pgtable_slot
;
362 * Let's zero out old valid and hash index details
363 * hash fault look at them.
365 memset(pgtable
, 0, PTE_FRAG_SIZE
);
369 int hash__has_transparent_hugepage(void)
372 if (!mmu_has_feature(MMU_FTR_16M_PAGE
))
375 * We support THP only if PMD_SIZE is 16MB.
377 if (mmu_psize_defs
[MMU_PAGE_16M
].shift
!= PMD_SHIFT
)
380 * We need to make sure that we support 16MB hugepage in a segement
381 * with base page size 64K or 4K. We only enable THP with a PAGE_SIZE
385 * If we have 64K HPTE, we will be using that by default
387 if (mmu_psize_defs
[MMU_PAGE_64K
].shift
&&
388 (mmu_psize_defs
[MMU_PAGE_64K
].penc
[MMU_PAGE_16M
] == -1))
391 * Ok we only have 4K HPTE
393 if (mmu_psize_defs
[MMU_PAGE_4K
].penc
[MMU_PAGE_16M
] == -1)
398 EXPORT_SYMBOL_GPL(hash__has_transparent_hugepage
);
400 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
402 #ifdef CONFIG_STRICT_KERNEL_RWX
403 static bool hash__change_memory_range(unsigned long start
, unsigned long end
,
407 unsigned int step
, shift
;
409 shift
= mmu_psize_defs
[mmu_linear_psize
].shift
;
412 start
= ALIGN_DOWN(start
, step
);
413 end
= ALIGN(end
, step
); // aligns up
418 pr_debug("Changing page protection on range 0x%lx-0x%lx, to 0x%lx, step 0x%x\n",
419 start
, end
, newpp
, step
);
421 for (idx
= start
; idx
< end
; idx
+= step
)
422 /* Not sure if we can do much with the return value */
423 mmu_hash_ops
.hpte_updateboltedpp(newpp
, idx
, mmu_linear_psize
,
429 void hash__mark_rodata_ro(void)
431 unsigned long start
, end
;
433 start
= (unsigned long)_stext
;
434 end
= (unsigned long)__init_begin
;
436 WARN_ON(!hash__change_memory_range(start
, end
, PP_RXXX
));
439 void hash__mark_initmem_nx(void)
441 unsigned long start
, end
, pp
;
443 start
= (unsigned long)__init_begin
;
444 end
= (unsigned long)__init_end
;
446 pp
= htab_convert_pte_flags(pgprot_val(PAGE_KERNEL
), HPTE_USE_KERNEL_KEY
);
448 WARN_ON(!hash__change_memory_range(start
, end
, pp
));