2 * Copyright IBM Corp. 2006
3 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
6 #include <linux/bootmem.h>
9 #include <linux/module.h>
10 #include <linux/list.h>
11 #include <linux/hugetlb.h>
12 #include <linux/slab.h>
13 #include <asm/pgalloc.h>
14 #include <asm/pgtable.h>
15 #include <asm/setup.h>
16 #include <asm/tlbflush.h>
17 #include <asm/sections.h>
19 static DEFINE_MUTEX(vmem_mutex
);
21 struct memory_segment
{
22 struct list_head list
;
27 static LIST_HEAD(mem_segs
);
29 static void __ref
*vmem_alloc_pages(unsigned int order
)
31 if (slab_is_available())
32 return (void *)__get_free_pages(GFP_KERNEL
, order
);
33 return alloc_bootmem_pages((1 << order
) * PAGE_SIZE
);
36 static inline pud_t
*vmem_pud_alloc(void)
41 pud
= vmem_alloc_pages(2);
44 clear_table((unsigned long *) pud
, _REGION3_ENTRY_EMPTY
, PAGE_SIZE
* 4);
49 static inline pmd_t
*vmem_pmd_alloc(void)
54 pmd
= vmem_alloc_pages(2);
57 clear_table((unsigned long *) pmd
, _SEGMENT_ENTRY_EMPTY
, PAGE_SIZE
* 4);
62 static pte_t __ref
*vmem_pte_alloc(unsigned long address
)
66 if (slab_is_available())
67 pte
= (pte_t
*) page_table_alloc(&init_mm
, address
);
69 pte
= alloc_bootmem(PTRS_PER_PTE
* sizeof(pte_t
));
72 clear_table((unsigned long *) pte
, _PAGE_TYPE_EMPTY
,
73 PTRS_PER_PTE
* sizeof(pte_t
));
78 * Add a physical memory range to the 1:1 mapping.
80 static int vmem_add_mem(unsigned long start
, unsigned long size
, int ro
)
82 unsigned long end
= start
+ size
;
83 unsigned long address
= start
;
91 while (address
< end
) {
92 pte
= mk_pte_phys(address
, __pgprot(ro
? _PAGE_RO
: 0));
93 pg_dir
= pgd_offset_k(address
);
94 if (pgd_none(*pg_dir
)) {
95 pu_dir
= vmem_pud_alloc();
98 pgd_populate(&init_mm
, pg_dir
, pu_dir
);
100 pu_dir
= pud_offset(pg_dir
, address
);
101 #if defined(CONFIG_64BIT) && !defined(CONFIG_DEBUG_PAGEALLOC)
102 if (MACHINE_HAS_EDAT2
&& pud_none(*pu_dir
) && address
&&
103 !(address
& ~PUD_MASK
) && (address
+ PUD_SIZE
<= end
)) {
104 pte_val(pte
) |= _REGION3_ENTRY_LARGE
;
105 pte_val(pte
) |= _REGION_ENTRY_TYPE_R3
;
106 pud_val(*pu_dir
) = pte_val(pte
);
111 if (pud_none(*pu_dir
)) {
112 pm_dir
= vmem_pmd_alloc();
115 pud_populate(&init_mm
, pu_dir
, pm_dir
);
117 pm_dir
= pmd_offset(pu_dir
, address
);
118 #if defined(CONFIG_64BIT) && !defined(CONFIG_DEBUG_PAGEALLOC)
119 if (MACHINE_HAS_EDAT1
&& pmd_none(*pm_dir
) && address
&&
120 !(address
& ~PMD_MASK
) && (address
+ PMD_SIZE
<= end
)) {
121 pte_val(pte
) |= _SEGMENT_ENTRY_LARGE
;
122 pmd_val(*pm_dir
) = pte_val(pte
);
127 if (pmd_none(*pm_dir
)) {
128 pt_dir
= vmem_pte_alloc(address
);
131 pmd_populate(&init_mm
, pm_dir
, pt_dir
);
134 pt_dir
= pte_offset_kernel(pm_dir
, address
);
136 address
+= PAGE_SIZE
;
140 flush_tlb_kernel_range(start
, end
);
145 * Remove a physical memory range from the 1:1 mapping.
146 * Currently only invalidates page table entries.
148 static void vmem_remove_range(unsigned long start
, unsigned long size
)
150 unsigned long end
= start
+ size
;
151 unsigned long address
= start
;
158 pte_val(pte
) = _PAGE_TYPE_EMPTY
;
159 while (address
< end
) {
160 pg_dir
= pgd_offset_k(address
);
161 if (pgd_none(*pg_dir
)) {
162 address
+= PGDIR_SIZE
;
165 pu_dir
= pud_offset(pg_dir
, address
);
166 if (pud_none(*pu_dir
)) {
170 if (pud_large(*pu_dir
)) {
175 pm_dir
= pmd_offset(pu_dir
, address
);
176 if (pmd_none(*pm_dir
)) {
180 if (pmd_large(*pm_dir
)) {
185 pt_dir
= pte_offset_kernel(pm_dir
, address
);
187 address
+= PAGE_SIZE
;
189 flush_tlb_kernel_range(start
, end
);
193 * Add a backed mem_map array to the virtual mem_map array.
195 int __meminit
vmemmap_populate(struct page
*start
, unsigned long nr
, int node
)
197 unsigned long address
, start_addr
, end_addr
;
205 start_addr
= (unsigned long) start
;
206 end_addr
= (unsigned long) (start
+ nr
);
208 for (address
= start_addr
; address
< end_addr
;) {
209 pg_dir
= pgd_offset_k(address
);
210 if (pgd_none(*pg_dir
)) {
211 pu_dir
= vmem_pud_alloc();
214 pgd_populate(&init_mm
, pg_dir
, pu_dir
);
217 pu_dir
= pud_offset(pg_dir
, address
);
218 if (pud_none(*pu_dir
)) {
219 pm_dir
= vmem_pmd_alloc();
222 pud_populate(&init_mm
, pu_dir
, pm_dir
);
225 pm_dir
= pmd_offset(pu_dir
, address
);
226 if (pmd_none(*pm_dir
)) {
228 /* Use 1MB frames for vmemmap if available. We always
229 * use large frames even if they are only partially
231 * Otherwise we would have also page tables since
232 * vmemmap_populate gets called for each section
234 if (MACHINE_HAS_EDAT1
) {
237 new_page
= vmemmap_alloc_block(PMD_SIZE
, node
);
240 pte
= mk_pte_phys(__pa(new_page
), PAGE_RW
);
241 pte_val(pte
) |= _SEGMENT_ENTRY_LARGE
;
242 pmd_val(*pm_dir
) = pte_val(pte
);
243 address
= (address
+ PMD_SIZE
) & PMD_MASK
;
247 pt_dir
= vmem_pte_alloc(address
);
250 pmd_populate(&init_mm
, pm_dir
, pt_dir
);
251 } else if (pmd_large(*pm_dir
)) {
252 address
= (address
+ PMD_SIZE
) & PMD_MASK
;
256 pt_dir
= pte_offset_kernel(pm_dir
, address
);
257 if (pte_none(*pt_dir
)) {
258 unsigned long new_page
;
260 new_page
=__pa(vmem_alloc_pages(0));
263 pte
= pfn_pte(new_page
>> PAGE_SHIFT
, PAGE_KERNEL
);
266 address
+= PAGE_SIZE
;
268 memset(start
, 0, nr
* sizeof(struct page
));
271 flush_tlb_kernel_range(start_addr
, end_addr
);
276 * Add memory segment to the segment list if it doesn't overlap with
277 * an already present segment.
279 static int insert_memory_segment(struct memory_segment
*seg
)
281 struct memory_segment
*tmp
;
283 if (seg
->start
+ seg
->size
> VMEM_MAX_PHYS
||
284 seg
->start
+ seg
->size
< seg
->start
)
287 list_for_each_entry(tmp
, &mem_segs
, list
) {
288 if (seg
->start
>= tmp
->start
+ tmp
->size
)
290 if (seg
->start
+ seg
->size
<= tmp
->start
)
294 list_add(&seg
->list
, &mem_segs
);
299 * Remove memory segment from the segment list.
301 static void remove_memory_segment(struct memory_segment
*seg
)
303 list_del(&seg
->list
);
306 static void __remove_shared_memory(struct memory_segment
*seg
)
308 remove_memory_segment(seg
);
309 vmem_remove_range(seg
->start
, seg
->size
);
312 int vmem_remove_mapping(unsigned long start
, unsigned long size
)
314 struct memory_segment
*seg
;
317 mutex_lock(&vmem_mutex
);
320 list_for_each_entry(seg
, &mem_segs
, list
) {
321 if (seg
->start
== start
&& seg
->size
== size
)
325 if (seg
->start
!= start
|| seg
->size
!= size
)
329 __remove_shared_memory(seg
);
332 mutex_unlock(&vmem_mutex
);
336 int vmem_add_mapping(unsigned long start
, unsigned long size
)
338 struct memory_segment
*seg
;
341 mutex_lock(&vmem_mutex
);
343 seg
= kzalloc(sizeof(*seg
), GFP_KERNEL
);
349 ret
= insert_memory_segment(seg
);
353 ret
= vmem_add_mem(start
, size
, 0);
359 __remove_shared_memory(seg
);
363 mutex_unlock(&vmem_mutex
);
368 * map whole physical memory to virtual memory (identity mapping)
369 * we reserve enough space in the vmalloc area for vmemmap to hotplug
370 * additional memory segments.
372 void __init
vmem_map_init(void)
374 unsigned long ro_start
, ro_end
;
375 unsigned long start
, end
;
378 ro_start
= PFN_ALIGN((unsigned long)&_stext
);
379 ro_end
= (unsigned long)&_eshared
& PAGE_MASK
;
380 for (i
= 0; i
< MEMORY_CHUNKS
&& memory_chunk
[i
].size
> 0; i
++) {
381 if (memory_chunk
[i
].type
== CHUNK_CRASHK
||
382 memory_chunk
[i
].type
== CHUNK_OLDMEM
)
384 start
= memory_chunk
[i
].addr
;
385 end
= memory_chunk
[i
].addr
+ memory_chunk
[i
].size
;
386 if (start
>= ro_end
|| end
<= ro_start
)
387 vmem_add_mem(start
, end
- start
, 0);
388 else if (start
>= ro_start
&& end
<= ro_end
)
389 vmem_add_mem(start
, end
- start
, 1);
390 else if (start
>= ro_start
) {
391 vmem_add_mem(start
, ro_end
- start
, 1);
392 vmem_add_mem(ro_end
, end
- ro_end
, 0);
393 } else if (end
< ro_end
) {
394 vmem_add_mem(start
, ro_start
- start
, 0);
395 vmem_add_mem(ro_start
, end
- ro_start
, 1);
397 vmem_add_mem(start
, ro_start
- start
, 0);
398 vmem_add_mem(ro_start
, ro_end
- ro_start
, 1);
399 vmem_add_mem(ro_end
, end
- ro_end
, 0);
405 * Convert memory chunk array to a memory segment list so there is a single
406 * list that contains both r/w memory and shared memory segments.
408 static int __init
vmem_convert_memory_chunk(void)
410 struct memory_segment
*seg
;
413 mutex_lock(&vmem_mutex
);
414 for (i
= 0; i
< MEMORY_CHUNKS
; i
++) {
415 if (!memory_chunk
[i
].size
)
417 if (memory_chunk
[i
].type
== CHUNK_CRASHK
||
418 memory_chunk
[i
].type
== CHUNK_OLDMEM
)
420 seg
= kzalloc(sizeof(*seg
), GFP_KERNEL
);
422 panic("Out of memory...\n");
423 seg
->start
= memory_chunk
[i
].addr
;
424 seg
->size
= memory_chunk
[i
].size
;
425 insert_memory_segment(seg
);
427 mutex_unlock(&vmem_mutex
);
431 core_initcall(vmem_convert_memory_chunk
);