2 * Copyright IBM Corp. 2006
3 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
6 #include <linux/bootmem.h>
9 #include <linux/module.h>
10 #include <linux/list.h>
11 #include <linux/hugetlb.h>
12 #include <linux/slab.h>
13 #include <linux/memblock.h>
14 #include <asm/cacheflush.h>
15 #include <asm/pgalloc.h>
16 #include <asm/pgtable.h>
17 #include <asm/setup.h>
18 #include <asm/tlbflush.h>
19 #include <asm/sections.h>
21 static DEFINE_MUTEX(vmem_mutex
);
23 struct memory_segment
{
24 struct list_head list
;
29 static LIST_HEAD(mem_segs
);
31 static void __ref
*vmem_alloc_pages(unsigned int order
)
33 unsigned long size
= PAGE_SIZE
<< order
;
35 if (slab_is_available())
36 return (void *)__get_free_pages(GFP_KERNEL
, order
);
37 return alloc_bootmem_align(size
, size
);
40 static inline pud_t
*vmem_pud_alloc(void)
44 pud
= vmem_alloc_pages(2);
47 clear_table((unsigned long *) pud
, _REGION3_ENTRY_EMPTY
, PAGE_SIZE
* 4);
51 pmd_t
*vmem_pmd_alloc(void)
55 pmd
= vmem_alloc_pages(2);
58 clear_table((unsigned long *) pmd
, _SEGMENT_ENTRY_EMPTY
, PAGE_SIZE
* 4);
62 pte_t __ref
*vmem_pte_alloc(void)
66 if (slab_is_available())
67 pte
= (pte_t
*) page_table_alloc(&init_mm
);
69 pte
= alloc_bootmem_align(PTRS_PER_PTE
* sizeof(pte_t
),
70 PTRS_PER_PTE
* sizeof(pte_t
));
73 clear_table((unsigned long *) pte
, _PAGE_INVALID
,
74 PTRS_PER_PTE
* sizeof(pte_t
));
79 * Add a physical memory range to the 1:1 mapping.
81 static int vmem_add_mem(unsigned long start
, unsigned long size
)
83 unsigned long pages4k
, pages1m
, pages2g
;
84 unsigned long end
= start
+ size
;
85 unsigned long address
= start
;
92 pages4k
= pages1m
= pages2g
= 0;
93 while (address
< end
) {
94 pg_dir
= pgd_offset_k(address
);
95 if (pgd_none(*pg_dir
)) {
96 pu_dir
= vmem_pud_alloc();
99 pgd_populate(&init_mm
, pg_dir
, pu_dir
);
101 pu_dir
= pud_offset(pg_dir
, address
);
102 if (MACHINE_HAS_EDAT2
&& pud_none(*pu_dir
) && address
&&
103 !(address
& ~PUD_MASK
) && (address
+ PUD_SIZE
<= end
) &&
104 !debug_pagealloc_enabled()) {
105 pud_val(*pu_dir
) = address
| pgprot_val(REGION3_KERNEL
);
110 if (pud_none(*pu_dir
)) {
111 pm_dir
= vmem_pmd_alloc();
114 pud_populate(&init_mm
, pu_dir
, pm_dir
);
116 pm_dir
= pmd_offset(pu_dir
, address
);
117 if (MACHINE_HAS_EDAT1
&& pmd_none(*pm_dir
) && address
&&
118 !(address
& ~PMD_MASK
) && (address
+ PMD_SIZE
<= end
) &&
119 !debug_pagealloc_enabled()) {
120 pmd_val(*pm_dir
) = address
| pgprot_val(SEGMENT_KERNEL
);
125 if (pmd_none(*pm_dir
)) {
126 pt_dir
= vmem_pte_alloc();
129 pmd_populate(&init_mm
, pm_dir
, pt_dir
);
132 pt_dir
= pte_offset_kernel(pm_dir
, address
);
133 pte_val(*pt_dir
) = address
| pgprot_val(PAGE_KERNEL
);
134 address
+= PAGE_SIZE
;
139 update_page_count(PG_DIRECT_MAP_4K
, pages4k
);
140 update_page_count(PG_DIRECT_MAP_1M
, pages1m
);
141 update_page_count(PG_DIRECT_MAP_2G
, pages2g
);
146 * Remove a physical memory range from the 1:1 mapping.
147 * Currently only invalidates page table entries.
149 static void vmem_remove_range(unsigned long start
, unsigned long size
)
151 unsigned long pages4k
, pages1m
, pages2g
;
152 unsigned long end
= start
+ size
;
153 unsigned long address
= start
;
159 pages4k
= pages1m
= pages2g
= 0;
160 while (address
< end
) {
161 pg_dir
= pgd_offset_k(address
);
162 if (pgd_none(*pg_dir
)) {
163 address
+= PGDIR_SIZE
;
166 pu_dir
= pud_offset(pg_dir
, address
);
167 if (pud_none(*pu_dir
)) {
171 if (pud_large(*pu_dir
)) {
177 pm_dir
= pmd_offset(pu_dir
, address
);
178 if (pmd_none(*pm_dir
)) {
182 if (pmd_large(*pm_dir
)) {
188 pt_dir
= pte_offset_kernel(pm_dir
, address
);
189 pte_clear(&init_mm
, address
, pt_dir
);
190 address
+= PAGE_SIZE
;
193 flush_tlb_kernel_range(start
, end
);
194 update_page_count(PG_DIRECT_MAP_4K
, -pages4k
);
195 update_page_count(PG_DIRECT_MAP_1M
, -pages1m
);
196 update_page_count(PG_DIRECT_MAP_2G
, -pages2g
);
200 * Add a backed mem_map array to the virtual mem_map array.
202 int __meminit
vmemmap_populate(unsigned long start
, unsigned long end
, int node
)
204 unsigned long address
= start
;
211 for (address
= start
; address
< end
;) {
212 pg_dir
= pgd_offset_k(address
);
213 if (pgd_none(*pg_dir
)) {
214 pu_dir
= vmem_pud_alloc();
217 pgd_populate(&init_mm
, pg_dir
, pu_dir
);
220 pu_dir
= pud_offset(pg_dir
, address
);
221 if (pud_none(*pu_dir
)) {
222 pm_dir
= vmem_pmd_alloc();
225 pud_populate(&init_mm
, pu_dir
, pm_dir
);
228 pm_dir
= pmd_offset(pu_dir
, address
);
229 if (pmd_none(*pm_dir
)) {
230 /* Use 1MB frames for vmemmap if available. We always
231 * use large frames even if they are only partially
233 * Otherwise we would have also page tables since
234 * vmemmap_populate gets called for each section
236 if (MACHINE_HAS_EDAT1
) {
239 new_page
= vmemmap_alloc_block(PMD_SIZE
, node
);
242 pmd_val(*pm_dir
) = __pa(new_page
) |
243 _SEGMENT_ENTRY
| _SEGMENT_ENTRY_LARGE
;
244 address
= (address
+ PMD_SIZE
) & PMD_MASK
;
247 pt_dir
= vmem_pte_alloc();
250 pmd_populate(&init_mm
, pm_dir
, pt_dir
);
251 } else if (pmd_large(*pm_dir
)) {
252 address
= (address
+ PMD_SIZE
) & PMD_MASK
;
256 pt_dir
= pte_offset_kernel(pm_dir
, address
);
257 if (pte_none(*pt_dir
)) {
260 new_page
= vmemmap_alloc_block(PAGE_SIZE
, node
);
264 __pa(new_page
) | pgprot_val(PAGE_KERNEL
);
266 address
+= PAGE_SIZE
;
273 void vmemmap_free(unsigned long start
, unsigned long end
)
278 * Add memory segment to the segment list if it doesn't overlap with
279 * an already present segment.
281 static int insert_memory_segment(struct memory_segment
*seg
)
283 struct memory_segment
*tmp
;
285 if (seg
->start
+ seg
->size
> VMEM_MAX_PHYS
||
286 seg
->start
+ seg
->size
< seg
->start
)
289 list_for_each_entry(tmp
, &mem_segs
, list
) {
290 if (seg
->start
>= tmp
->start
+ tmp
->size
)
292 if (seg
->start
+ seg
->size
<= tmp
->start
)
296 list_add(&seg
->list
, &mem_segs
);
301 * Remove memory segment from the segment list.
303 static void remove_memory_segment(struct memory_segment
*seg
)
305 list_del(&seg
->list
);
308 static void __remove_shared_memory(struct memory_segment
*seg
)
310 remove_memory_segment(seg
);
311 vmem_remove_range(seg
->start
, seg
->size
);
314 int vmem_remove_mapping(unsigned long start
, unsigned long size
)
316 struct memory_segment
*seg
;
319 mutex_lock(&vmem_mutex
);
322 list_for_each_entry(seg
, &mem_segs
, list
) {
323 if (seg
->start
== start
&& seg
->size
== size
)
327 if (seg
->start
!= start
|| seg
->size
!= size
)
331 __remove_shared_memory(seg
);
334 mutex_unlock(&vmem_mutex
);
338 int vmem_add_mapping(unsigned long start
, unsigned long size
)
340 struct memory_segment
*seg
;
343 mutex_lock(&vmem_mutex
);
345 seg
= kzalloc(sizeof(*seg
), GFP_KERNEL
);
351 ret
= insert_memory_segment(seg
);
355 ret
= vmem_add_mem(start
, size
);
361 __remove_shared_memory(seg
);
365 mutex_unlock(&vmem_mutex
);
370 * map whole physical memory to virtual memory (identity mapping)
371 * we reserve enough space in the vmalloc area for vmemmap to hotplug
372 * additional memory segments.
374 void __init
vmem_map_init(void)
376 unsigned long size
= _eshared
- _stext
;
377 struct memblock_region
*reg
;
379 for_each_memblock(memory
, reg
)
380 vmem_add_mem(reg
->base
, reg
->size
);
381 set_memory_ro((unsigned long)_stext
, size
>> PAGE_SHIFT
);
382 pr_info("Write protected kernel read-only data: %luk\n", size
>> 10);
386 * Convert memblock.memory to a memory segment list so there is a single
387 * list that contains all memory segments.
389 static int __init
vmem_convert_memory_chunk(void)
391 struct memblock_region
*reg
;
392 struct memory_segment
*seg
;
394 mutex_lock(&vmem_mutex
);
395 for_each_memblock(memory
, reg
) {
396 seg
= kzalloc(sizeof(*seg
), GFP_KERNEL
);
398 panic("Out of memory...\n");
399 seg
->start
= reg
->base
;
400 seg
->size
= reg
->size
;
401 insert_memory_segment(seg
);
403 mutex_unlock(&vmem_mutex
);
407 core_initcall(vmem_convert_memory_chunk
);