4 * Copyright IBM Corp. 2006
5 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
8 #include <linux/bootmem.h>
11 #include <linux/module.h>
12 #include <linux/list.h>
13 #include <linux/hugetlb.h>
14 #include <asm/pgalloc.h>
15 #include <asm/pgtable.h>
16 #include <asm/setup.h>
17 #include <asm/tlbflush.h>
18 #include <asm/sections.h>
20 static DEFINE_MUTEX(vmem_mutex
);
22 struct memory_segment
{
23 struct list_head list
;
28 static LIST_HEAD(mem_segs
);
30 static void __ref
*vmem_alloc_pages(unsigned int order
)
32 if (slab_is_available())
33 return (void *)__get_free_pages(GFP_KERNEL
, order
);
34 return alloc_bootmem_pages((1 << order
) * PAGE_SIZE
);
37 static inline pud_t
*vmem_pud_alloc(void)
42 pud
= vmem_alloc_pages(2);
45 clear_table((unsigned long *) pud
, _REGION3_ENTRY_EMPTY
, PAGE_SIZE
* 4);
50 static inline pmd_t
*vmem_pmd_alloc(void)
55 pmd
= vmem_alloc_pages(2);
58 clear_table((unsigned long *) pmd
, _SEGMENT_ENTRY_EMPTY
, PAGE_SIZE
* 4);
63 static pte_t __ref
*vmem_pte_alloc(void)
67 if (slab_is_available())
68 pte
= (pte_t
*) page_table_alloc(&init_mm
);
70 pte
= alloc_bootmem(PTRS_PER_PTE
* sizeof(pte_t
));
73 if (MACHINE_HAS_HPAGE
)
74 clear_table((unsigned long *) pte
, _PAGE_TYPE_EMPTY
| _PAGE_CO
,
75 PTRS_PER_PTE
* sizeof(pte_t
));
77 clear_table((unsigned long *) pte
, _PAGE_TYPE_EMPTY
,
78 PTRS_PER_PTE
* sizeof(pte_t
));
83 * Add a physical memory range to the 1:1 mapping.
85 static int vmem_add_mem(unsigned long start
, unsigned long size
, int ro
)
87 unsigned long address
;
95 for (address
= start
; address
< start
+ size
; address
+= PAGE_SIZE
) {
96 pg_dir
= pgd_offset_k(address
);
97 if (pgd_none(*pg_dir
)) {
98 pu_dir
= vmem_pud_alloc();
101 pgd_populate_kernel(&init_mm
, pg_dir
, pu_dir
);
104 pu_dir
= pud_offset(pg_dir
, address
);
105 if (pud_none(*pu_dir
)) {
106 pm_dir
= vmem_pmd_alloc();
109 pud_populate_kernel(&init_mm
, pu_dir
, pm_dir
);
112 pte
= mk_pte_phys(address
, __pgprot(ro
? _PAGE_RO
: 0));
113 pm_dir
= pmd_offset(pu_dir
, address
);
116 if (MACHINE_HAS_HPAGE
&& !(address
& ~HPAGE_MASK
) &&
117 (address
+ HPAGE_SIZE
<= start
+ size
) &&
118 (address
>= HPAGE_SIZE
)) {
119 pte_val(pte
) |= _SEGMENT_ENTRY_LARGE
|
121 pmd_val(*pm_dir
) = pte_val(pte
);
122 address
+= HPAGE_SIZE
- PAGE_SIZE
;
126 if (pmd_none(*pm_dir
)) {
127 pt_dir
= vmem_pte_alloc();
130 pmd_populate_kernel(&init_mm
, pm_dir
, pt_dir
);
133 pt_dir
= pte_offset_kernel(pm_dir
, address
);
138 flush_tlb_kernel_range(start
, start
+ size
);
143 * Remove a physical memory range from the 1:1 mapping.
144 * Currently only invalidates page table entries.
146 static void vmem_remove_range(unsigned long start
, unsigned long size
)
148 unsigned long address
;
155 pte_val(pte
) = _PAGE_TYPE_EMPTY
;
156 for (address
= start
; address
< start
+ size
; address
+= PAGE_SIZE
) {
157 pg_dir
= pgd_offset_k(address
);
158 pu_dir
= pud_offset(pg_dir
, address
);
159 if (pud_none(*pu_dir
))
161 pm_dir
= pmd_offset(pu_dir
, address
);
162 if (pmd_none(*pm_dir
))
165 if (pmd_huge(*pm_dir
)) {
166 pmd_clear_kernel(pm_dir
);
167 address
+= HPAGE_SIZE
- PAGE_SIZE
;
171 pt_dir
= pte_offset_kernel(pm_dir
, address
);
174 flush_tlb_kernel_range(start
, start
+ size
);
178 * Add a backed mem_map array to the virtual mem_map array.
180 int __meminit
vmemmap_populate(struct page
*start
, unsigned long nr
, int node
)
182 unsigned long address
, start_addr
, end_addr
;
190 start_addr
= (unsigned long) start
;
191 end_addr
= (unsigned long) (start
+ nr
);
193 for (address
= start_addr
; address
< end_addr
; address
+= PAGE_SIZE
) {
194 pg_dir
= pgd_offset_k(address
);
195 if (pgd_none(*pg_dir
)) {
196 pu_dir
= vmem_pud_alloc();
199 pgd_populate_kernel(&init_mm
, pg_dir
, pu_dir
);
202 pu_dir
= pud_offset(pg_dir
, address
);
203 if (pud_none(*pu_dir
)) {
204 pm_dir
= vmem_pmd_alloc();
207 pud_populate_kernel(&init_mm
, pu_dir
, pm_dir
);
210 pm_dir
= pmd_offset(pu_dir
, address
);
211 if (pmd_none(*pm_dir
)) {
212 pt_dir
= vmem_pte_alloc();
215 pmd_populate_kernel(&init_mm
, pm_dir
, pt_dir
);
218 pt_dir
= pte_offset_kernel(pm_dir
, address
);
219 if (pte_none(*pt_dir
)) {
220 unsigned long new_page
;
222 new_page
=__pa(vmem_alloc_pages(0));
225 pte
= pfn_pte(new_page
>> PAGE_SHIFT
, PAGE_KERNEL
);
229 memset(start
, 0, nr
* sizeof(struct page
));
232 flush_tlb_kernel_range(start_addr
, end_addr
);
237 * Add memory segment to the segment list if it doesn't overlap with
238 * an already present segment.
240 static int insert_memory_segment(struct memory_segment
*seg
)
242 struct memory_segment
*tmp
;
244 if (seg
->start
+ seg
->size
> VMEM_MAX_PHYS
||
245 seg
->start
+ seg
->size
< seg
->start
)
248 list_for_each_entry(tmp
, &mem_segs
, list
) {
249 if (seg
->start
>= tmp
->start
+ tmp
->size
)
251 if (seg
->start
+ seg
->size
<= tmp
->start
)
255 list_add(&seg
->list
, &mem_segs
);
260 * Remove memory segment from the segment list.
262 static void remove_memory_segment(struct memory_segment
*seg
)
264 list_del(&seg
->list
);
267 static void __remove_shared_memory(struct memory_segment
*seg
)
269 remove_memory_segment(seg
);
270 vmem_remove_range(seg
->start
, seg
->size
);
273 int vmem_remove_mapping(unsigned long start
, unsigned long size
)
275 struct memory_segment
*seg
;
278 mutex_lock(&vmem_mutex
);
281 list_for_each_entry(seg
, &mem_segs
, list
) {
282 if (seg
->start
== start
&& seg
->size
== size
)
286 if (seg
->start
!= start
|| seg
->size
!= size
)
290 __remove_shared_memory(seg
);
293 mutex_unlock(&vmem_mutex
);
297 int vmem_add_mapping(unsigned long start
, unsigned long size
)
299 struct memory_segment
*seg
;
302 mutex_lock(&vmem_mutex
);
304 seg
= kzalloc(sizeof(*seg
), GFP_KERNEL
);
310 ret
= insert_memory_segment(seg
);
314 ret
= vmem_add_mem(start
, size
, 0);
320 __remove_shared_memory(seg
);
324 mutex_unlock(&vmem_mutex
);
329 * map whole physical memory to virtual memory (identity mapping)
330 * we reserve enough space in the vmalloc area for vmemmap to hotplug
331 * additional memory segments.
333 void __init
vmem_map_init(void)
335 unsigned long ro_start
, ro_end
;
336 unsigned long start
, end
;
339 spin_lock_init(&init_mm
.context
.list_lock
);
340 INIT_LIST_HEAD(&init_mm
.context
.crst_list
);
341 INIT_LIST_HEAD(&init_mm
.context
.pgtable_list
);
342 init_mm
.context
.noexec
= 0;
343 ro_start
= ((unsigned long)&_stext
) & PAGE_MASK
;
344 ro_end
= PFN_ALIGN((unsigned long)&_eshared
);
345 for (i
= 0; i
< MEMORY_CHUNKS
&& memory_chunk
[i
].size
> 0; i
++) {
346 start
= memory_chunk
[i
].addr
;
347 end
= memory_chunk
[i
].addr
+ memory_chunk
[i
].size
;
348 if (start
>= ro_end
|| end
<= ro_start
)
349 vmem_add_mem(start
, end
- start
, 0);
350 else if (start
>= ro_start
&& end
<= ro_end
)
351 vmem_add_mem(start
, end
- start
, 1);
352 else if (start
>= ro_start
) {
353 vmem_add_mem(start
, ro_end
- start
, 1);
354 vmem_add_mem(ro_end
, end
- ro_end
, 0);
355 } else if (end
< ro_end
) {
356 vmem_add_mem(start
, ro_start
- start
, 0);
357 vmem_add_mem(ro_start
, end
- ro_start
, 1);
359 vmem_add_mem(start
, ro_start
- start
, 0);
360 vmem_add_mem(ro_start
, ro_end
- ro_start
, 1);
361 vmem_add_mem(ro_end
, end
- ro_end
, 0);
367 * Convert memory chunk array to a memory segment list so there is a single
368 * list that contains both r/w memory and shared memory segments.
370 static int __init
vmem_convert_memory_chunk(void)
372 struct memory_segment
*seg
;
375 mutex_lock(&vmem_mutex
);
376 for (i
= 0; i
< MEMORY_CHUNKS
; i
++) {
377 if (!memory_chunk
[i
].size
)
379 seg
= kzalloc(sizeof(*seg
), GFP_KERNEL
);
381 panic("Out of memory...\n");
382 seg
->start
= memory_chunk
[i
].addr
;
383 seg
->size
= memory_chunk
[i
].size
;
384 insert_memory_segment(seg
);
386 mutex_unlock(&vmem_mutex
);
390 core_initcall(vmem_convert_memory_chunk
);