1 #include <linux/ioport.h>
2 #include <linux/swap.h>
4 #include <asm/cacheflush.h>
8 #include <asm/page_types.h>
9 #include <asm/sections.h>
10 #include <asm/system.h>
11 #include <asm/tlbflush.h>
13 unsigned long __initdata e820_table_start
;
14 unsigned long __meminitdata e820_table_end
;
15 unsigned long __meminitdata e820_table_top
;
17 enum bootmem_state bootmem_state
= BEFORE_BOOTMEM
;
20 #ifdef CONFIG_DIRECT_GBPAGES
25 static void __init
find_early_table_space(unsigned long end
, int use_pse
,
28 unsigned long puds
, pmds
, ptes
, tables
, start
;
30 puds
= (end
+ PUD_SIZE
- 1) >> PUD_SHIFT
;
31 tables
= roundup(puds
* sizeof(pud_t
), PAGE_SIZE
);
36 extra
= end
- ((end
>>PUD_SHIFT
) << PUD_SHIFT
);
37 pmds
= (extra
+ PMD_SIZE
- 1) >> PMD_SHIFT
;
39 pmds
= (end
+ PMD_SIZE
- 1) >> PMD_SHIFT
;
41 tables
+= roundup(pmds
* sizeof(pmd_t
), PAGE_SIZE
);
46 extra
= end
- ((end
>>PMD_SHIFT
) << PMD_SHIFT
);
50 ptes
= (extra
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
52 ptes
= (end
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
54 tables
+= roundup(ptes
* sizeof(pte_t
), PAGE_SIZE
);
58 tables
+= roundup(__end_of_fixed_addresses
* sizeof(pte_t
), PAGE_SIZE
);
62 * RED-PEN putting page tables only on node 0 could
63 * cause a hotspot and fill up ZONE_DMA. The page tables
64 * need roughly 0.5KB per GB.
68 e820_table_start
= find_e820_area(start
, max_pfn_mapped
<<PAGE_SHIFT
,
70 #else /* CONFIG_X86_64 */
72 e820_table_start
= find_e820_area(start
, end
, tables
, PAGE_SIZE
);
74 if (e820_table_start
== -1UL)
75 panic("Cannot find space for the kernel page tables");
77 e820_table_start
>>= PAGE_SHIFT
;
78 e820_table_end
= e820_table_start
;
79 e820_table_top
= e820_table_start
+ (tables
>> PAGE_SHIFT
);
81 printk(KERN_DEBUG
"kernel direct mapping tables up to %lx @ %lx-%lx\n",
82 end
, e820_table_start
<< PAGE_SHIFT
, e820_table_top
<< PAGE_SHIFT
);
88 unsigned page_size_mask
;
93 #else /* CONFIG_X86_64 */
97 static int __meminit
save_mr(struct map_range
*mr
, int nr_range
,
98 unsigned long start_pfn
, unsigned long end_pfn
,
99 unsigned long page_size_mask
)
101 if (start_pfn
< end_pfn
) {
102 if (nr_range
>= NR_RANGE_MR
)
103 panic("run out of range for init_memory_mapping\n");
104 mr
[nr_range
].start
= start_pfn
<<PAGE_SHIFT
;
105 mr
[nr_range
].end
= end_pfn
<<PAGE_SHIFT
;
106 mr
[nr_range
].page_size_mask
= page_size_mask
;
114 static void __init
init_gbpages(void)
116 if (direct_gbpages
&& cpu_has_gbpages
)
117 printk(KERN_INFO
"Using GB pages for direct mapping\n");
122 static inline void init_gbpages(void)
128 * Setup the direct mapping of the physical memory at PAGE_OFFSET.
129 * This runs before bootmem is initialized and gets pages directly from
130 * the physical memory. To access them they are temporarily mapped.
132 unsigned long __init_refok
init_memory_mapping(unsigned long start
,
135 unsigned long page_size_mask
= 0;
136 unsigned long start_pfn
, end_pfn
;
137 unsigned long ret
= 0;
140 struct map_range mr
[NR_RANGE_MR
];
142 int use_pse
, use_gbpages
;
144 printk(KERN_INFO
"init_memory_mapping: %016lx-%016lx\n", start
, end
);
146 if (bootmem_state
== BEFORE_BOOTMEM
)
149 #if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KMEMCHECK)
151 * For CONFIG_DEBUG_PAGEALLOC, identity mapping will use small pages.
152 * This will simplify cpa(), which otherwise needs to support splitting
153 * large pages into small in interrupt context, etc.
155 use_pse
= use_gbpages
= 0;
157 use_pse
= cpu_has_pse
;
158 use_gbpages
= direct_gbpages
;
162 #ifdef CONFIG_X86_PAE
165 printk(KERN_INFO
"NX (Execute Disable) protection: active\n");
168 /* Enable PSE if available */
170 set_in_cr4(X86_CR4_PSE
);
172 /* Enable PGE if available */
174 set_in_cr4(X86_CR4_PGE
);
175 __supported_pte_mask
|= _PAGE_GLOBAL
;
180 page_size_mask
|= 1 << PG_LEVEL_1G
;
182 page_size_mask
|= 1 << PG_LEVEL_2M
;
184 memset(mr
, 0, sizeof(mr
));
187 /* head if not big page alignment ? */
188 start_pfn
= start
>> PAGE_SHIFT
;
189 pos
= start_pfn
<< PAGE_SHIFT
;
192 * Don't use a large page for the first 2/4MB of memory
193 * because there are often fixed size MTRRs in there
194 * and overlapping MTRRs into large pages can cause
198 end_pfn
= 1<<(PMD_SHIFT
- PAGE_SHIFT
);
200 end_pfn
= ((pos
+ (PMD_SIZE
- 1))>>PMD_SHIFT
)
201 << (PMD_SHIFT
- PAGE_SHIFT
);
202 #else /* CONFIG_X86_64 */
203 end_pfn
= ((pos
+ (PMD_SIZE
- 1)) >> PMD_SHIFT
)
204 << (PMD_SHIFT
- PAGE_SHIFT
);
206 if (end_pfn
> (end
>> PAGE_SHIFT
))
207 end_pfn
= end
>> PAGE_SHIFT
;
208 if (start_pfn
< end_pfn
) {
209 nr_range
= save_mr(mr
, nr_range
, start_pfn
, end_pfn
, 0);
210 pos
= end_pfn
<< PAGE_SHIFT
;
213 /* big page (2M) range */
214 start_pfn
= ((pos
+ (PMD_SIZE
- 1))>>PMD_SHIFT
)
215 << (PMD_SHIFT
- PAGE_SHIFT
);
217 end_pfn
= (end
>>PMD_SHIFT
) << (PMD_SHIFT
- PAGE_SHIFT
);
218 #else /* CONFIG_X86_64 */
219 end_pfn
= ((pos
+ (PUD_SIZE
- 1))>>PUD_SHIFT
)
220 << (PUD_SHIFT
- PAGE_SHIFT
);
221 if (end_pfn
> ((end
>>PMD_SHIFT
)<<(PMD_SHIFT
- PAGE_SHIFT
)))
222 end_pfn
= ((end
>>PMD_SHIFT
)<<(PMD_SHIFT
- PAGE_SHIFT
));
225 if (start_pfn
< end_pfn
) {
226 nr_range
= save_mr(mr
, nr_range
, start_pfn
, end_pfn
,
227 page_size_mask
& (1<<PG_LEVEL_2M
));
228 pos
= end_pfn
<< PAGE_SHIFT
;
232 /* big page (1G) range */
233 start_pfn
= ((pos
+ (PUD_SIZE
- 1))>>PUD_SHIFT
)
234 << (PUD_SHIFT
- PAGE_SHIFT
);
235 end_pfn
= (end
>> PUD_SHIFT
) << (PUD_SHIFT
- PAGE_SHIFT
);
236 if (start_pfn
< end_pfn
) {
237 nr_range
= save_mr(mr
, nr_range
, start_pfn
, end_pfn
,
239 ((1<<PG_LEVEL_2M
)|(1<<PG_LEVEL_1G
)));
240 pos
= end_pfn
<< PAGE_SHIFT
;
243 /* tail is not big page (1G) alignment */
244 start_pfn
= ((pos
+ (PMD_SIZE
- 1))>>PMD_SHIFT
)
245 << (PMD_SHIFT
- PAGE_SHIFT
);
246 end_pfn
= (end
>> PMD_SHIFT
) << (PMD_SHIFT
- PAGE_SHIFT
);
247 if (start_pfn
< end_pfn
) {
248 nr_range
= save_mr(mr
, nr_range
, start_pfn
, end_pfn
,
249 page_size_mask
& (1<<PG_LEVEL_2M
));
250 pos
= end_pfn
<< PAGE_SHIFT
;
254 /* tail is not big page (2M) alignment */
255 start_pfn
= pos
>>PAGE_SHIFT
;
256 end_pfn
= end
>>PAGE_SHIFT
;
257 nr_range
= save_mr(mr
, nr_range
, start_pfn
, end_pfn
, 0);
259 /* try to merge same page size and continuous */
260 for (i
= 0; nr_range
> 1 && i
< nr_range
- 1; i
++) {
261 unsigned long old_start
;
262 if (mr
[i
].end
!= mr
[i
+1].start
||
263 mr
[i
].page_size_mask
!= mr
[i
+1].page_size_mask
)
266 old_start
= mr
[i
].start
;
267 memmove(&mr
[i
], &mr
[i
+1],
268 (nr_range
- 1 - i
) * sizeof(struct map_range
));
269 mr
[i
--].start
= old_start
;
273 for (i
= 0; i
< nr_range
; i
++)
274 printk(KERN_DEBUG
" %010lx - %010lx page %s\n",
275 mr
[i
].start
, mr
[i
].end
,
276 (mr
[i
].page_size_mask
& (1<<PG_LEVEL_1G
))?"1G":(
277 (mr
[i
].page_size_mask
& (1<<PG_LEVEL_2M
))?"2M":"4k"));
280 * Find space for the kernel direct mapping tables.
282 * Later we should allocate these tables in the local node of the
283 * memory mapped. Unfortunately this is done currently before the
284 * nodes are discovered.
286 if (bootmem_state
== BEFORE_BOOTMEM
)
287 find_early_table_space(end
, use_pse
, use_gbpages
);
290 for (i
= 0; i
< nr_range
; i
++)
291 kernel_physical_mapping_init(mr
[i
].start
, mr
[i
].end
,
292 mr
[i
].page_size_mask
);
294 #else /* CONFIG_X86_64 */
295 for (i
= 0; i
< nr_range
; i
++)
296 ret
= kernel_physical_mapping_init(mr
[i
].start
, mr
[i
].end
,
297 mr
[i
].page_size_mask
);
301 early_ioremap_page_table_range_init();
303 load_cr3(swapper_pg_dir
);
307 if (bootmem_state
== BEFORE_BOOTMEM
)
308 mmu_cr4_features
= read_cr4();
312 if (bootmem_state
== BEFORE_BOOTMEM
&&
313 e820_table_end
> e820_table_start
)
314 reserve_early(e820_table_start
<< PAGE_SHIFT
,
315 e820_table_end
<< PAGE_SHIFT
, "PGTABLE");
317 if (bootmem_state
== BEFORE_BOOTMEM
)
318 early_memtest(start
, end
);
320 return ret
>> PAGE_SHIFT
;
325 * devmem_is_allowed() checks to see if /dev/mem access to a certain address
326 * is valid. The argument is a physical page number.
329 * On x86, access has to be given to the first megabyte of ram because that area
330 * contains bios code and data regions used by X and dosemu and similar apps.
331 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
332 * mmio resources as well as potential bios/acpi data regions.
334 int devmem_is_allowed(unsigned long pagenr
)
338 if (iomem_is_exclusive(pagenr
<< PAGE_SHIFT
))
340 if (!page_is_ram(pagenr
))
345 void free_init_pages(char *what
, unsigned long begin
, unsigned long end
)
347 unsigned long addr
= begin
;
353 * If debugging page accesses then do not free this memory but
354 * mark them not present - any buggy init-section access will
355 * create a kernel page fault:
357 #ifdef CONFIG_DEBUG_PAGEALLOC
358 printk(KERN_INFO
"debug: unmapping init memory %08lx..%08lx\n",
359 begin
, PAGE_ALIGN(end
));
360 set_memory_np(begin
, (end
- begin
) >> PAGE_SHIFT
);
363 * We just marked the kernel text read only above, now that
364 * we are going to free part of that, we need to make that
367 set_memory_rw(begin
, (end
- begin
) >> PAGE_SHIFT
);
369 printk(KERN_INFO
"Freeing %s: %luk freed\n", what
, (end
- begin
) >> 10);
371 for (; addr
< end
; addr
+= PAGE_SIZE
) {
372 ClearPageReserved(virt_to_page(addr
));
373 init_page_count(virt_to_page(addr
));
374 memset((void *)(addr
& ~(PAGE_SIZE
-1)),
375 POISON_FREE_INITMEM
, PAGE_SIZE
);
382 void free_initmem(void)
384 free_init_pages("unused kernel memory",
385 (unsigned long)(&__init_begin
),
386 (unsigned long)(&__init_end
));
389 #ifdef CONFIG_BLK_DEV_INITRD
390 void free_initrd_mem(unsigned long start
, unsigned long end
)
392 free_init_pages("initrd memory", start
, end
);