1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright IBM Corp. 2006
4 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
7 #include <linux/memblock.h>
10 #include <linux/init.h>
11 #include <linux/list.h>
12 #include <linux/hugetlb.h>
13 #include <linux/slab.h>
14 #include <asm/cacheflush.h>
15 #include <asm/pgalloc.h>
16 #include <asm/pgtable.h>
17 #include <asm/setup.h>
18 #include <asm/tlbflush.h>
19 #include <asm/sections.h>
20 #include <asm/set_memory.h>
22 static DEFINE_MUTEX(vmem_mutex
);
24 struct memory_segment
{
25 struct list_head list
;
30 static LIST_HEAD(mem_segs
);
32 static void __ref
*vmem_alloc_pages(unsigned int order
)
34 unsigned long size
= PAGE_SIZE
<< order
;
36 if (slab_is_available())
37 return (void *)__get_free_pages(GFP_KERNEL
, order
);
38 return (void *) memblock_phys_alloc(size
, size
);
41 void *vmem_crst_alloc(unsigned long val
)
45 table
= vmem_alloc_pages(CRST_ALLOC_ORDER
);
47 crst_table_init(table
, val
);
51 pte_t __ref
*vmem_pte_alloc(void)
53 unsigned long size
= PTRS_PER_PTE
* sizeof(pte_t
);
56 if (slab_is_available())
57 pte
= (pte_t
*) page_table_alloc(&init_mm
);
59 pte
= (pte_t
*) memblock_phys_alloc(size
, size
);
62 memset64((u64
*)pte
, _PAGE_INVALID
, PTRS_PER_PTE
);
67 * Add a physical memory range to the 1:1 mapping.
69 static int vmem_add_mem(unsigned long start
, unsigned long size
)
71 unsigned long pgt_prot
, sgt_prot
, r3_prot
;
72 unsigned long pages4k
, pages1m
, pages2g
;
73 unsigned long end
= start
+ size
;
74 unsigned long address
= start
;
82 pgt_prot
= pgprot_val(PAGE_KERNEL
);
83 sgt_prot
= pgprot_val(SEGMENT_KERNEL
);
84 r3_prot
= pgprot_val(REGION3_KERNEL
);
85 if (!MACHINE_HAS_NX
) {
86 pgt_prot
&= ~_PAGE_NOEXEC
;
87 sgt_prot
&= ~_SEGMENT_ENTRY_NOEXEC
;
88 r3_prot
&= ~_REGION_ENTRY_NOEXEC
;
90 pages4k
= pages1m
= pages2g
= 0;
91 while (address
< end
) {
92 pg_dir
= pgd_offset_k(address
);
93 if (pgd_none(*pg_dir
)) {
94 p4_dir
= vmem_crst_alloc(_REGION2_ENTRY_EMPTY
);
97 pgd_populate(&init_mm
, pg_dir
, p4_dir
);
99 p4_dir
= p4d_offset(pg_dir
, address
);
100 if (p4d_none(*p4_dir
)) {
101 pu_dir
= vmem_crst_alloc(_REGION3_ENTRY_EMPTY
);
104 p4d_populate(&init_mm
, p4_dir
, pu_dir
);
106 pu_dir
= pud_offset(p4_dir
, address
);
107 if (MACHINE_HAS_EDAT2
&& pud_none(*pu_dir
) && address
&&
108 !(address
& ~PUD_MASK
) && (address
+ PUD_SIZE
<= end
) &&
109 !debug_pagealloc_enabled()) {
110 pud_val(*pu_dir
) = address
| r3_prot
;
115 if (pud_none(*pu_dir
)) {
116 pm_dir
= vmem_crst_alloc(_SEGMENT_ENTRY_EMPTY
);
119 pud_populate(&init_mm
, pu_dir
, pm_dir
);
121 pm_dir
= pmd_offset(pu_dir
, address
);
122 if (MACHINE_HAS_EDAT1
&& pmd_none(*pm_dir
) && address
&&
123 !(address
& ~PMD_MASK
) && (address
+ PMD_SIZE
<= end
) &&
124 !debug_pagealloc_enabled()) {
125 pmd_val(*pm_dir
) = address
| sgt_prot
;
130 if (pmd_none(*pm_dir
)) {
131 pt_dir
= vmem_pte_alloc();
134 pmd_populate(&init_mm
, pm_dir
, pt_dir
);
137 pt_dir
= pte_offset_kernel(pm_dir
, address
);
138 pte_val(*pt_dir
) = address
| pgt_prot
;
139 address
+= PAGE_SIZE
;
144 update_page_count(PG_DIRECT_MAP_4K
, pages4k
);
145 update_page_count(PG_DIRECT_MAP_1M
, pages1m
);
146 update_page_count(PG_DIRECT_MAP_2G
, pages2g
);
151 * Remove a physical memory range from the 1:1 mapping.
152 * Currently only invalidates page table entries.
154 static void vmem_remove_range(unsigned long start
, unsigned long size
)
156 unsigned long pages4k
, pages1m
, pages2g
;
157 unsigned long end
= start
+ size
;
158 unsigned long address
= start
;
165 pages4k
= pages1m
= pages2g
= 0;
166 while (address
< end
) {
167 pg_dir
= pgd_offset_k(address
);
168 if (pgd_none(*pg_dir
)) {
169 address
+= PGDIR_SIZE
;
172 p4_dir
= p4d_offset(pg_dir
, address
);
173 if (p4d_none(*p4_dir
)) {
177 pu_dir
= pud_offset(p4_dir
, address
);
178 if (pud_none(*pu_dir
)) {
182 if (pud_large(*pu_dir
)) {
188 pm_dir
= pmd_offset(pu_dir
, address
);
189 if (pmd_none(*pm_dir
)) {
193 if (pmd_large(*pm_dir
)) {
199 pt_dir
= pte_offset_kernel(pm_dir
, address
);
200 pte_clear(&init_mm
, address
, pt_dir
);
201 address
+= PAGE_SIZE
;
204 flush_tlb_kernel_range(start
, end
);
205 update_page_count(PG_DIRECT_MAP_4K
, -pages4k
);
206 update_page_count(PG_DIRECT_MAP_1M
, -pages1m
);
207 update_page_count(PG_DIRECT_MAP_2G
, -pages2g
);
211 * Add a backed mem_map array to the virtual mem_map array.
213 int __meminit
vmemmap_populate(unsigned long start
, unsigned long end
, int node
,
214 struct vmem_altmap
*altmap
)
216 unsigned long pgt_prot
, sgt_prot
;
217 unsigned long address
= start
;
225 pgt_prot
= pgprot_val(PAGE_KERNEL
);
226 sgt_prot
= pgprot_val(SEGMENT_KERNEL
);
227 if (!MACHINE_HAS_NX
) {
228 pgt_prot
&= ~_PAGE_NOEXEC
;
229 sgt_prot
&= ~_SEGMENT_ENTRY_NOEXEC
;
231 for (address
= start
; address
< end
;) {
232 pg_dir
= pgd_offset_k(address
);
233 if (pgd_none(*pg_dir
)) {
234 p4_dir
= vmem_crst_alloc(_REGION2_ENTRY_EMPTY
);
237 pgd_populate(&init_mm
, pg_dir
, p4_dir
);
240 p4_dir
= p4d_offset(pg_dir
, address
);
241 if (p4d_none(*p4_dir
)) {
242 pu_dir
= vmem_crst_alloc(_REGION3_ENTRY_EMPTY
);
245 p4d_populate(&init_mm
, p4_dir
, pu_dir
);
248 pu_dir
= pud_offset(p4_dir
, address
);
249 if (pud_none(*pu_dir
)) {
250 pm_dir
= vmem_crst_alloc(_SEGMENT_ENTRY_EMPTY
);
253 pud_populate(&init_mm
, pu_dir
, pm_dir
);
256 pm_dir
= pmd_offset(pu_dir
, address
);
257 if (pmd_none(*pm_dir
)) {
258 /* Use 1MB frames for vmemmap if available. We always
259 * use large frames even if they are only partially
261 * Otherwise we would have also page tables since
262 * vmemmap_populate gets called for each section
264 if (MACHINE_HAS_EDAT1
) {
267 new_page
= vmemmap_alloc_block(PMD_SIZE
, node
);
270 pmd_val(*pm_dir
) = __pa(new_page
) | sgt_prot
;
271 address
= (address
+ PMD_SIZE
) & PMD_MASK
;
274 pt_dir
= vmem_pte_alloc();
277 pmd_populate(&init_mm
, pm_dir
, pt_dir
);
278 } else if (pmd_large(*pm_dir
)) {
279 address
= (address
+ PMD_SIZE
) & PMD_MASK
;
283 pt_dir
= pte_offset_kernel(pm_dir
, address
);
284 if (pte_none(*pt_dir
)) {
287 new_page
= vmemmap_alloc_block(PAGE_SIZE
, node
);
290 pte_val(*pt_dir
) = __pa(new_page
) | pgt_prot
;
292 address
+= PAGE_SIZE
;
299 void vmemmap_free(unsigned long start
, unsigned long end
,
300 struct vmem_altmap
*altmap
)
305 * Add memory segment to the segment list if it doesn't overlap with
306 * an already present segment.
308 static int insert_memory_segment(struct memory_segment
*seg
)
310 struct memory_segment
*tmp
;
312 if (seg
->start
+ seg
->size
> VMEM_MAX_PHYS
||
313 seg
->start
+ seg
->size
< seg
->start
)
316 list_for_each_entry(tmp
, &mem_segs
, list
) {
317 if (seg
->start
>= tmp
->start
+ tmp
->size
)
319 if (seg
->start
+ seg
->size
<= tmp
->start
)
323 list_add(&seg
->list
, &mem_segs
);
328 * Remove memory segment from the segment list.
330 static void remove_memory_segment(struct memory_segment
*seg
)
332 list_del(&seg
->list
);
335 static void __remove_shared_memory(struct memory_segment
*seg
)
337 remove_memory_segment(seg
);
338 vmem_remove_range(seg
->start
, seg
->size
);
341 int vmem_remove_mapping(unsigned long start
, unsigned long size
)
343 struct memory_segment
*seg
;
346 mutex_lock(&vmem_mutex
);
349 list_for_each_entry(seg
, &mem_segs
, list
) {
350 if (seg
->start
== start
&& seg
->size
== size
)
354 if (seg
->start
!= start
|| seg
->size
!= size
)
358 __remove_shared_memory(seg
);
361 mutex_unlock(&vmem_mutex
);
365 int vmem_add_mapping(unsigned long start
, unsigned long size
)
367 struct memory_segment
*seg
;
370 mutex_lock(&vmem_mutex
);
372 seg
= kzalloc(sizeof(*seg
), GFP_KERNEL
);
378 ret
= insert_memory_segment(seg
);
382 ret
= vmem_add_mem(start
, size
);
388 __remove_shared_memory(seg
);
392 mutex_unlock(&vmem_mutex
);
397 * map whole physical memory to virtual memory (identity mapping)
398 * we reserve enough space in the vmalloc area for vmemmap to hotplug
399 * additional memory segments.
401 void __init
vmem_map_init(void)
403 struct memblock_region
*reg
;
405 for_each_memblock(memory
, reg
)
406 vmem_add_mem(reg
->base
, reg
->size
);
407 __set_memory((unsigned long)_stext
,
408 (unsigned long)(_etext
- _stext
) >> PAGE_SHIFT
,
409 SET_MEMORY_RO
| SET_MEMORY_X
);
410 __set_memory((unsigned long)_etext
,
411 (unsigned long)(__end_rodata
- _etext
) >> PAGE_SHIFT
,
413 __set_memory((unsigned long)_sinittext
,
414 (unsigned long)(_einittext
- _sinittext
) >> PAGE_SHIFT
,
415 SET_MEMORY_RO
| SET_MEMORY_X
);
416 __set_memory(__stext_dma
, (__etext_dma
- __stext_dma
) >> PAGE_SHIFT
,
417 SET_MEMORY_RO
| SET_MEMORY_X
);
418 pr_info("Write protected kernel read-only data: %luk\n",
419 (unsigned long)(__end_rodata
- _stext
) >> 10);
423 * Convert memblock.memory to a memory segment list so there is a single
424 * list that contains all memory segments.
426 static int __init
vmem_convert_memory_chunk(void)
428 struct memblock_region
*reg
;
429 struct memory_segment
*seg
;
431 mutex_lock(&vmem_mutex
);
432 for_each_memblock(memory
, reg
) {
433 seg
= kzalloc(sizeof(*seg
), GFP_KERNEL
);
435 panic("Out of memory...\n");
436 seg
->start
= reg
->base
;
437 seg
->size
= reg
->size
;
438 insert_memory_segment(seg
);
440 mutex_unlock(&vmem_mutex
);
444 core_initcall(vmem_convert_memory_chunk
);