2 * Copyright IBM Corp. 2006
3 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
6 #include <linux/bootmem.h>
9 #include <linux/module.h>
10 #include <linux/list.h>
11 #include <linux/hugetlb.h>
12 #include <linux/slab.h>
13 #include <linux/memblock.h>
14 #include <asm/pgalloc.h>
15 #include <asm/pgtable.h>
16 #include <asm/setup.h>
17 #include <asm/tlbflush.h>
18 #include <asm/sections.h>
20 static DEFINE_MUTEX(vmem_mutex
);
22 struct memory_segment
{
23 struct list_head list
;
28 static LIST_HEAD(mem_segs
);
30 static void __ref
*vmem_alloc_pages(unsigned int order
)
32 if (slab_is_available())
33 return (void *)__get_free_pages(GFP_KERNEL
, order
);
34 return alloc_bootmem_pages((1 << order
) * PAGE_SIZE
);
37 static inline pud_t
*vmem_pud_alloc(void)
41 pud
= vmem_alloc_pages(2);
44 clear_table((unsigned long *) pud
, _REGION3_ENTRY_EMPTY
, PAGE_SIZE
* 4);
48 static inline pmd_t
*vmem_pmd_alloc(void)
52 pmd
= vmem_alloc_pages(2);
55 clear_table((unsigned long *) pmd
, _SEGMENT_ENTRY_EMPTY
, PAGE_SIZE
* 4);
59 static pte_t __ref
*vmem_pte_alloc(unsigned long address
)
63 if (slab_is_available())
64 pte
= (pte_t
*) page_table_alloc(&init_mm
);
66 pte
= alloc_bootmem_align(PTRS_PER_PTE
* sizeof(pte_t
),
67 PTRS_PER_PTE
* sizeof(pte_t
));
70 clear_table((unsigned long *) pte
, _PAGE_INVALID
,
71 PTRS_PER_PTE
* sizeof(pte_t
));
76 * Add a physical memory range to the 1:1 mapping.
78 static int vmem_add_mem(unsigned long start
, unsigned long size
, int ro
)
80 unsigned long end
= start
+ size
;
81 unsigned long address
= start
;
88 while (address
< end
) {
89 pg_dir
= pgd_offset_k(address
);
90 if (pgd_none(*pg_dir
)) {
91 pu_dir
= vmem_pud_alloc();
94 pgd_populate(&init_mm
, pg_dir
, pu_dir
);
96 pu_dir
= pud_offset(pg_dir
, address
);
97 #ifndef CONFIG_DEBUG_PAGEALLOC
98 if (MACHINE_HAS_EDAT2
&& pud_none(*pu_dir
) && address
&&
99 !(address
& ~PUD_MASK
) && (address
+ PUD_SIZE
<= end
)) {
100 pud_val(*pu_dir
) = __pa(address
) |
101 _REGION_ENTRY_TYPE_R3
| _REGION3_ENTRY_LARGE
|
102 (ro
? _REGION_ENTRY_PROTECT
: 0);
107 if (pud_none(*pu_dir
)) {
108 pm_dir
= vmem_pmd_alloc();
111 pud_populate(&init_mm
, pu_dir
, pm_dir
);
113 pm_dir
= pmd_offset(pu_dir
, address
);
114 #ifndef CONFIG_DEBUG_PAGEALLOC
115 if (MACHINE_HAS_EDAT1
&& pmd_none(*pm_dir
) && address
&&
116 !(address
& ~PMD_MASK
) && (address
+ PMD_SIZE
<= end
)) {
117 pmd_val(*pm_dir
) = __pa(address
) |
118 _SEGMENT_ENTRY
| _SEGMENT_ENTRY_LARGE
|
119 _SEGMENT_ENTRY_YOUNG
|
120 (ro
? _SEGMENT_ENTRY_PROTECT
: 0);
125 if (pmd_none(*pm_dir
)) {
126 pt_dir
= vmem_pte_alloc(address
);
129 pmd_populate(&init_mm
, pm_dir
, pt_dir
);
132 pt_dir
= pte_offset_kernel(pm_dir
, address
);
133 pte_val(*pt_dir
) = __pa(address
) |
134 pgprot_val(ro
? PAGE_KERNEL_RO
: PAGE_KERNEL
);
135 address
+= PAGE_SIZE
;
143 * Remove a physical memory range from the 1:1 mapping.
144 * Currently only invalidates page table entries.
146 static void vmem_remove_range(unsigned long start
, unsigned long size
)
148 unsigned long end
= start
+ size
;
149 unsigned long address
= start
;
156 pte_val(pte
) = _PAGE_INVALID
;
157 while (address
< end
) {
158 pg_dir
= pgd_offset_k(address
);
159 if (pgd_none(*pg_dir
)) {
160 address
+= PGDIR_SIZE
;
163 pu_dir
= pud_offset(pg_dir
, address
);
164 if (pud_none(*pu_dir
)) {
168 if (pud_large(*pu_dir
)) {
173 pm_dir
= pmd_offset(pu_dir
, address
);
174 if (pmd_none(*pm_dir
)) {
178 if (pmd_large(*pm_dir
)) {
183 pt_dir
= pte_offset_kernel(pm_dir
, address
);
185 address
+= PAGE_SIZE
;
187 flush_tlb_kernel_range(start
, end
);
191 * Add a backed mem_map array to the virtual mem_map array.
193 int __meminit
vmemmap_populate(unsigned long start
, unsigned long end
, int node
)
195 unsigned long address
= start
;
202 for (address
= start
; address
< end
;) {
203 pg_dir
= pgd_offset_k(address
);
204 if (pgd_none(*pg_dir
)) {
205 pu_dir
= vmem_pud_alloc();
208 pgd_populate(&init_mm
, pg_dir
, pu_dir
);
211 pu_dir
= pud_offset(pg_dir
, address
);
212 if (pud_none(*pu_dir
)) {
213 pm_dir
= vmem_pmd_alloc();
216 pud_populate(&init_mm
, pu_dir
, pm_dir
);
219 pm_dir
= pmd_offset(pu_dir
, address
);
220 if (pmd_none(*pm_dir
)) {
221 /* Use 1MB frames for vmemmap if available. We always
222 * use large frames even if they are only partially
224 * Otherwise we would have also page tables since
225 * vmemmap_populate gets called for each section
227 if (MACHINE_HAS_EDAT1
) {
230 new_page
= vmemmap_alloc_block(PMD_SIZE
, node
);
233 pmd_val(*pm_dir
) = __pa(new_page
) |
234 _SEGMENT_ENTRY
| _SEGMENT_ENTRY_LARGE
;
235 address
= (address
+ PMD_SIZE
) & PMD_MASK
;
238 pt_dir
= vmem_pte_alloc(address
);
241 pmd_populate(&init_mm
, pm_dir
, pt_dir
);
242 } else if (pmd_large(*pm_dir
)) {
243 address
= (address
+ PMD_SIZE
) & PMD_MASK
;
247 pt_dir
= pte_offset_kernel(pm_dir
, address
);
248 if (pte_none(*pt_dir
)) {
251 new_page
= vmemmap_alloc_block(PAGE_SIZE
, node
);
255 __pa(new_page
) | pgprot_val(PAGE_KERNEL
);
257 address
+= PAGE_SIZE
;
264 void vmemmap_free(unsigned long start
, unsigned long end
)
269 * Add memory segment to the segment list if it doesn't overlap with
270 * an already present segment.
272 static int insert_memory_segment(struct memory_segment
*seg
)
274 struct memory_segment
*tmp
;
276 if (seg
->start
+ seg
->size
> VMEM_MAX_PHYS
||
277 seg
->start
+ seg
->size
< seg
->start
)
280 list_for_each_entry(tmp
, &mem_segs
, list
) {
281 if (seg
->start
>= tmp
->start
+ tmp
->size
)
283 if (seg
->start
+ seg
->size
<= tmp
->start
)
287 list_add(&seg
->list
, &mem_segs
);
292 * Remove memory segment from the segment list.
294 static void remove_memory_segment(struct memory_segment
*seg
)
296 list_del(&seg
->list
);
299 static void __remove_shared_memory(struct memory_segment
*seg
)
301 remove_memory_segment(seg
);
302 vmem_remove_range(seg
->start
, seg
->size
);
305 int vmem_remove_mapping(unsigned long start
, unsigned long size
)
307 struct memory_segment
*seg
;
310 mutex_lock(&vmem_mutex
);
313 list_for_each_entry(seg
, &mem_segs
, list
) {
314 if (seg
->start
== start
&& seg
->size
== size
)
318 if (seg
->start
!= start
|| seg
->size
!= size
)
322 __remove_shared_memory(seg
);
325 mutex_unlock(&vmem_mutex
);
329 int vmem_add_mapping(unsigned long start
, unsigned long size
)
331 struct memory_segment
*seg
;
334 mutex_lock(&vmem_mutex
);
336 seg
= kzalloc(sizeof(*seg
), GFP_KERNEL
);
342 ret
= insert_memory_segment(seg
);
346 ret
= vmem_add_mem(start
, size
, 0);
352 __remove_shared_memory(seg
);
356 mutex_unlock(&vmem_mutex
);
361 * map whole physical memory to virtual memory (identity mapping)
362 * we reserve enough space in the vmalloc area for vmemmap to hotplug
363 * additional memory segments.
365 void __init
vmem_map_init(void)
367 unsigned long ro_start
, ro_end
;
368 struct memblock_region
*reg
;
369 phys_addr_t start
, end
;
371 ro_start
= PFN_ALIGN((unsigned long)&_stext
);
372 ro_end
= (unsigned long)&_eshared
& PAGE_MASK
;
373 for_each_memblock(memory
, reg
) {
375 end
= reg
->base
+ reg
->size
- 1;
376 if (start
>= ro_end
|| end
<= ro_start
)
377 vmem_add_mem(start
, end
- start
, 0);
378 else if (start
>= ro_start
&& end
<= ro_end
)
379 vmem_add_mem(start
, end
- start
, 1);
380 else if (start
>= ro_start
) {
381 vmem_add_mem(start
, ro_end
- start
, 1);
382 vmem_add_mem(ro_end
, end
- ro_end
, 0);
383 } else if (end
< ro_end
) {
384 vmem_add_mem(start
, ro_start
- start
, 0);
385 vmem_add_mem(ro_start
, end
- ro_start
, 1);
387 vmem_add_mem(start
, ro_start
- start
, 0);
388 vmem_add_mem(ro_start
, ro_end
- ro_start
, 1);
389 vmem_add_mem(ro_end
, end
- ro_end
, 0);
395 * Convert memblock.memory to a memory segment list so there is a single
396 * list that contains all memory segments.
398 static int __init
vmem_convert_memory_chunk(void)
400 struct memblock_region
*reg
;
401 struct memory_segment
*seg
;
403 mutex_lock(&vmem_mutex
);
404 for_each_memblock(memory
, reg
) {
405 seg
= kzalloc(sizeof(*seg
), GFP_KERNEL
);
407 panic("Out of memory...\n");
408 seg
->start
= reg
->base
;
409 seg
->size
= reg
->size
;
410 insert_memory_segment(seg
);
412 mutex_unlock(&vmem_mutex
);
416 core_initcall(vmem_convert_memory_chunk
);