2 #include <linux/initrd.h>
3 #include <linux/ioport.h>
4 #include <linux/swap.h>
5 #include <linux/memblock.h>
7 #include <asm/cacheflush.h>
11 #include <asm/page_types.h>
12 #include <asm/sections.h>
13 #include <asm/setup.h>
14 #include <asm/system.h>
15 #include <asm/tlbflush.h>
17 #include <asm/proto.h>
19 unsigned long __initdata pgt_buf_start
;
20 unsigned long __meminitdata pgt_buf_end
;
21 unsigned long __meminitdata pgt_buf_top
;
26 #ifdef CONFIG_DIRECT_GBPAGES
31 static unsigned long __init
find_early_fixmap_space(void)
33 unsigned long size
= 0;
35 int kmap_begin_pmd_idx
, kmap_end_pmd_idx
;
36 int fixmap_begin_pmd_idx
, fixmap_end_pmd_idx
;
37 int btmap_begin_pmd_idx
;
39 fixmap_begin_pmd_idx
=
40 __fix_to_virt(__end_of_fixed_addresses
- 1) >> PMD_SHIFT
;
42 * fixmap_end_pmd_idx is the end of the fixmap minus the PMD that
43 * has been defined in the data section by head_32.S (see
45 * Note: This is similar to what early_ioremap_page_table_range_init
46 * does except that the "end" has PMD_SIZE expunged as per previous
49 fixmap_end_pmd_idx
= (FIXADDR_TOP
- 1) >> PMD_SHIFT
;
50 btmap_begin_pmd_idx
= __fix_to_virt(FIX_BTMAP_BEGIN
) >> PMD_SHIFT
;
51 kmap_begin_pmd_idx
= __fix_to_virt(FIX_KMAP_END
) >> PMD_SHIFT
;
52 kmap_end_pmd_idx
= __fix_to_virt(FIX_KMAP_BEGIN
) >> PMD_SHIFT
;
54 size
= fixmap_end_pmd_idx
- fixmap_begin_pmd_idx
;
56 * early_ioremap_init has already allocated a PMD at
59 if (btmap_begin_pmd_idx
< fixmap_end_pmd_idx
)
64 * see page_table_kmap_check: if the kmap spans multiple PMDs, make
65 * sure the pte pages are allocated contiguously. It might need up
66 * to two additional pte pages to replace the page declared by
67 * head_32.S and the one allocated by early_ioremap_init, if they
68 * are even partially used for the kmap.
70 if (kmap_begin_pmd_idx
!= kmap_end_pmd_idx
) {
71 if (kmap_end_pmd_idx
== fixmap_end_pmd_idx
)
73 if (btmap_begin_pmd_idx
>= kmap_begin_pmd_idx
&&
74 btmap_begin_pmd_idx
<= kmap_end_pmd_idx
)
79 return (size
* PMD_SIZE
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
82 static void __init
find_early_table_space(unsigned long start
,
83 unsigned long end
, int use_pse
, int use_gbpages
)
85 unsigned long pmds
= 0, ptes
= 0, tables
= 0, good_end
= end
,
86 pud_mapped
= 0, pmd_mapped
= 0, size
= end
- start
;
89 pud_mapped
= DIV_ROUND_UP(PFN_PHYS(max_pfn_mapped
),
90 (PUD_SIZE
* PTRS_PER_PUD
));
91 pud_mapped
*= (PUD_SIZE
* PTRS_PER_PUD
);
92 pmd_mapped
= DIV_ROUND_UP(PFN_PHYS(max_pfn_mapped
),
93 (PMD_SIZE
* PTRS_PER_PMD
));
94 pmd_mapped
*= (PMD_SIZE
* PTRS_PER_PMD
);
97 * On x86_64 do not limit the size we need to cover with 4KB pages
98 * depending on the initial allocation because head_64.S always uses
102 if (start
< PFN_PHYS(max_pfn_mapped
)) {
103 if (PFN_PHYS(max_pfn_mapped
) < end
)
104 size
-= PFN_PHYS(max_pfn_mapped
) - start
;
110 #ifndef __PAGETABLE_PUD_FOLDED
111 if (end
> pud_mapped
) {
113 if (start
< pud_mapped
)
114 puds
= (end
- pud_mapped
+ PUD_SIZE
- 1) >> PUD_SHIFT
;
116 puds
= (end
- start
+ PUD_SIZE
- 1) >> PUD_SHIFT
;
117 tables
+= roundup(puds
* sizeof(pud_t
), PAGE_SIZE
);
124 extra
= end
- ((end
>>PUD_SHIFT
) << PUD_SHIFT
);
125 pmds
= (extra
+ PMD_SIZE
- 1) >> PMD_SHIFT
;
127 #ifndef __PAGETABLE_PMD_FOLDED
128 else if (end
> pmd_mapped
) {
129 if (start
< pmd_mapped
)
130 pmds
= (end
- pmd_mapped
+ PMD_SIZE
- 1) >> PMD_SHIFT
;
132 pmds
= (end
- start
+ PMD_SIZE
- 1) >> PMD_SHIFT
;
136 tables
+= roundup(pmds
* sizeof(pmd_t
), PAGE_SIZE
);
141 extra
= end
- ((end
>>PMD_SHIFT
) << PMD_SHIFT
);
142 ptes
= (extra
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
144 ptes
= (size
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
146 ptes
+= find_early_fixmap_space();
148 tables
+= roundup(ptes
* sizeof(pte_t
), PAGE_SIZE
);
154 good_end
= max_pfn_mapped
<< PAGE_SHIFT
;
157 base
= memblock_find_in_range(0x00, good_end
, tables
, PAGE_SIZE
);
158 if (base
== MEMBLOCK_ERROR
)
159 panic("Cannot find space for the kernel page tables");
161 pgt_buf_start
= base
>> PAGE_SHIFT
;
162 pgt_buf_end
= pgt_buf_start
;
163 pgt_buf_top
= pgt_buf_start
+ (tables
>> PAGE_SHIFT
);
165 printk(KERN_DEBUG
"kernel direct mapping tables up to %lx @ %lx-%lx\n",
166 end
, pgt_buf_start
<< PAGE_SHIFT
, pgt_buf_top
<< PAGE_SHIFT
);
168 if (pgt_buf_top
> pgt_buf_start
)
169 memblock_x86_reserve_range(pgt_buf_start
<< PAGE_SHIFT
,
170 pgt_buf_top
<< PAGE_SHIFT
, "PGTABLE");
176 unsigned page_size_mask
;
180 #define NR_RANGE_MR 3
181 #else /* CONFIG_X86_64 */
182 #define NR_RANGE_MR 5
185 static int __meminit
save_mr(struct map_range
*mr
, int nr_range
,
186 unsigned long start_pfn
, unsigned long end_pfn
,
187 unsigned long page_size_mask
)
189 if (start_pfn
< end_pfn
) {
190 if (nr_range
>= NR_RANGE_MR
)
191 panic("run out of range for init_memory_mapping\n");
192 mr
[nr_range
].start
= start_pfn
<<PAGE_SHIFT
;
193 mr
[nr_range
].end
= end_pfn
<<PAGE_SHIFT
;
194 mr
[nr_range
].page_size_mask
= page_size_mask
;
202 * Setup the direct mapping of the physical memory at PAGE_OFFSET.
203 * This runs before bootmem is initialized and gets pages directly from
204 * the physical memory. To access them they are temporarily mapped.
206 unsigned long __init_refok
init_memory_mapping(unsigned long start
,
209 unsigned long page_size_mask
= 0;
210 unsigned long start_pfn
, end_pfn
;
211 unsigned long ret
= 0;
214 struct map_range mr
[NR_RANGE_MR
];
216 int use_pse
, use_gbpages
;
218 printk(KERN_INFO
"init_memory_mapping: %016lx-%016lx\n", start
, end
);
220 #if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KMEMCHECK)
222 * For CONFIG_DEBUG_PAGEALLOC, identity mapping will use small pages.
223 * This will simplify cpa(), which otherwise needs to support splitting
224 * large pages into small in interrupt context, etc.
226 use_pse
= use_gbpages
= 0;
228 use_pse
= cpu_has_pse
;
229 use_gbpages
= direct_gbpages
;
232 /* Enable PSE if available */
234 set_in_cr4(X86_CR4_PSE
);
236 /* Enable PGE if available */
238 set_in_cr4(X86_CR4_PGE
);
239 __supported_pte_mask
|= _PAGE_GLOBAL
;
243 page_size_mask
|= 1 << PG_LEVEL_1G
;
245 page_size_mask
|= 1 << PG_LEVEL_2M
;
247 memset(mr
, 0, sizeof(mr
));
250 /* head if not big page alignment ? */
251 start_pfn
= start
>> PAGE_SHIFT
;
252 pos
= start_pfn
<< PAGE_SHIFT
;
255 * Don't use a large page for the first 2/4MB of memory
256 * because there are often fixed size MTRRs in there
257 * and overlapping MTRRs into large pages can cause
261 end_pfn
= 1<<(PMD_SHIFT
- PAGE_SHIFT
);
263 end_pfn
= ((pos
+ (PMD_SIZE
- 1))>>PMD_SHIFT
)
264 << (PMD_SHIFT
- PAGE_SHIFT
);
265 #else /* CONFIG_X86_64 */
266 end_pfn
= ((pos
+ (PMD_SIZE
- 1)) >> PMD_SHIFT
)
267 << (PMD_SHIFT
- PAGE_SHIFT
);
269 if (end_pfn
> (end
>> PAGE_SHIFT
))
270 end_pfn
= end
>> PAGE_SHIFT
;
271 if (start_pfn
< end_pfn
) {
272 nr_range
= save_mr(mr
, nr_range
, start_pfn
, end_pfn
, 0);
273 pos
= end_pfn
<< PAGE_SHIFT
;
276 /* big page (2M) range */
277 start_pfn
= ((pos
+ (PMD_SIZE
- 1))>>PMD_SHIFT
)
278 << (PMD_SHIFT
- PAGE_SHIFT
);
280 end_pfn
= (end
>>PMD_SHIFT
) << (PMD_SHIFT
- PAGE_SHIFT
);
281 #else /* CONFIG_X86_64 */
282 end_pfn
= ((pos
+ (PUD_SIZE
- 1))>>PUD_SHIFT
)
283 << (PUD_SHIFT
- PAGE_SHIFT
);
284 if (end_pfn
> ((end
>>PMD_SHIFT
)<<(PMD_SHIFT
- PAGE_SHIFT
)))
285 end_pfn
= ((end
>>PMD_SHIFT
)<<(PMD_SHIFT
- PAGE_SHIFT
));
288 if (start_pfn
< end_pfn
) {
289 nr_range
= save_mr(mr
, nr_range
, start_pfn
, end_pfn
,
290 page_size_mask
& (1<<PG_LEVEL_2M
));
291 pos
= end_pfn
<< PAGE_SHIFT
;
295 /* big page (1G) range */
296 start_pfn
= ((pos
+ (PUD_SIZE
- 1))>>PUD_SHIFT
)
297 << (PUD_SHIFT
- PAGE_SHIFT
);
298 end_pfn
= (end
>> PUD_SHIFT
) << (PUD_SHIFT
- PAGE_SHIFT
);
299 if (start_pfn
< end_pfn
) {
300 nr_range
= save_mr(mr
, nr_range
, start_pfn
, end_pfn
,
302 ((1<<PG_LEVEL_2M
)|(1<<PG_LEVEL_1G
)));
303 pos
= end_pfn
<< PAGE_SHIFT
;
306 /* tail is not big page (1G) alignment */
307 start_pfn
= ((pos
+ (PMD_SIZE
- 1))>>PMD_SHIFT
)
308 << (PMD_SHIFT
- PAGE_SHIFT
);
309 end_pfn
= (end
>> PMD_SHIFT
) << (PMD_SHIFT
- PAGE_SHIFT
);
310 if (start_pfn
< end_pfn
) {
311 nr_range
= save_mr(mr
, nr_range
, start_pfn
, end_pfn
,
312 page_size_mask
& (1<<PG_LEVEL_2M
));
313 pos
= end_pfn
<< PAGE_SHIFT
;
317 /* tail is not big page (2M) alignment */
318 start_pfn
= pos
>>PAGE_SHIFT
;
319 end_pfn
= end
>>PAGE_SHIFT
;
320 nr_range
= save_mr(mr
, nr_range
, start_pfn
, end_pfn
, 0);
322 /* try to merge same page size and continuous */
323 for (i
= 0; nr_range
> 1 && i
< nr_range
- 1; i
++) {
324 unsigned long old_start
;
325 if (mr
[i
].end
!= mr
[i
+1].start
||
326 mr
[i
].page_size_mask
!= mr
[i
+1].page_size_mask
)
329 old_start
= mr
[i
].start
;
330 memmove(&mr
[i
], &mr
[i
+1],
331 (nr_range
- 1 - i
) * sizeof(struct map_range
));
332 mr
[i
--].start
= old_start
;
336 for (i
= 0; i
< nr_range
; i
++)
337 printk(KERN_DEBUG
" %010lx - %010lx page %s\n",
338 mr
[i
].start
, mr
[i
].end
,
339 (mr
[i
].page_size_mask
& (1<<PG_LEVEL_1G
))?"1G":(
340 (mr
[i
].page_size_mask
& (1<<PG_LEVEL_2M
))?"2M":"4k"));
343 * Find space for the kernel direct mapping tables.
345 * Later we should allocate these tables in the local node of the
346 * memory mapped. Unfortunately this is done currently before the
347 * nodes are discovered.
350 find_early_table_space(start
, end
, use_pse
, use_gbpages
);
352 for (i
= 0; i
< nr_range
; i
++)
353 ret
= kernel_physical_mapping_init(mr
[i
].start
, mr
[i
].end
,
354 mr
[i
].page_size_mask
);
357 early_ioremap_page_table_range_init();
359 load_cr3(swapper_pg_dir
);
364 if (pgt_buf_end
!= pgt_buf_top
)
365 printk(KERN_DEBUG
"initial kernel pagetable allocation wasted %lx"
366 " pages\n", pgt_buf_top
- pgt_buf_end
);
369 early_memtest(start
, end
);
371 return ret
>> PAGE_SHIFT
;
376 * devmem_is_allowed() checks to see if /dev/mem access to a certain address
377 * is valid. The argument is a physical page number.
380 * On x86, access has to be given to the first megabyte of ram because that area
381 * contains bios code and data regions used by X and dosemu and similar apps.
382 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
383 * mmio resources as well as potential bios/acpi data regions.
385 int devmem_is_allowed(unsigned long pagenr
)
389 if (iomem_is_exclusive(pagenr
<< PAGE_SHIFT
))
391 if (!page_is_ram(pagenr
))
396 void free_init_pages(char *what
, unsigned long begin
, unsigned long end
)
399 unsigned long begin_aligned
, end_aligned
;
401 /* Make sure boundaries are page aligned */
402 begin_aligned
= PAGE_ALIGN(begin
);
403 end_aligned
= end
& PAGE_MASK
;
405 if (WARN_ON(begin_aligned
!= begin
|| end_aligned
!= end
)) {
406 begin
= begin_aligned
;
416 * If debugging page accesses then do not free this memory but
417 * mark them not present - any buggy init-section access will
418 * create a kernel page fault:
420 #ifdef CONFIG_DEBUG_PAGEALLOC
421 printk(KERN_INFO
"debug: unmapping init memory %08lx..%08lx\n",
423 set_memory_np(begin
, (end
- begin
) >> PAGE_SHIFT
);
426 * We just marked the kernel text read only above, now that
427 * we are going to free part of that, we need to make that
428 * writeable and non-executable first.
430 set_memory_nx(begin
, (end
- begin
) >> PAGE_SHIFT
);
431 set_memory_rw(begin
, (end
- begin
) >> PAGE_SHIFT
);
433 printk(KERN_INFO
"Freeing %s: %luk freed\n", what
, (end
- begin
) >> 10);
435 for (; addr
< end
; addr
+= PAGE_SIZE
) {
436 ClearPageReserved(virt_to_page(addr
));
437 init_page_count(virt_to_page(addr
));
438 memset((void *)addr
, POISON_FREE_INITMEM
, PAGE_SIZE
);
445 void free_initmem(void)
447 free_init_pages("unused kernel memory",
448 (unsigned long)(&__init_begin
),
449 (unsigned long)(&__init_end
));
452 #ifdef CONFIG_BLK_DEV_INITRD
453 void free_initrd_mem(unsigned long start
, unsigned long end
)
456 * end could be not aligned, and We can not align that,
457 * decompresser could be confused by aligned initrd_end
458 * We already reserve the end partial page before in
459 * - i386_start_kernel()
460 * - x86_64_start_kernel()
461 * - relocate_initrd()
462 * So here We can do PAGE_ALIGN() safely to get partial page to be freed
464 free_init_pages("initrd memory", start
, PAGE_ALIGN(end
));