1 #include <linux/initrd.h>
2 #include <linux/ioport.h>
3 #include <linux/swap.h>
5 #include <asm/cacheflush.h>
9 #include <asm/page_types.h>
10 #include <asm/sections.h>
11 #include <asm/setup.h>
12 #include <asm/system.h>
13 #include <asm/tlbflush.h>
15 #include <asm/proto.h>
17 DEFINE_PER_CPU(struct mmu_gather
, mmu_gathers
);
19 unsigned long __initdata e820_table_start
;
20 unsigned long __meminitdata e820_table_end
;
21 unsigned long __meminitdata e820_table_top
;
26 #ifdef CONFIG_DIRECT_GBPAGES
33 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
34 static int disable_nx __cpuinitdata
;
39 * Control non-executable mappings for processes.
44 static int __init
noexec_setup(char *str
)
48 if (!strncmp(str
, "on", 2)) {
49 __supported_pte_mask
|= _PAGE_NX
;
51 } else if (!strncmp(str
, "off", 3)) {
53 __supported_pte_mask
&= ~_PAGE_NX
;
57 early_param("noexec", noexec_setup
);
61 static void __init
set_nx(void)
63 unsigned int v
[4], l
, h
;
65 if (cpu_has_pae
&& (cpuid_eax(0x80000000) > 0x80000001)) {
66 cpuid(0x80000001, &v
[0], &v
[1], &v
[2], &v
[3]);
68 if ((v
[3] & (1 << 20)) && !disable_nx
) {
69 rdmsr(MSR_EFER
, l
, h
);
71 wrmsr(MSR_EFER
, l
, h
);
73 __supported_pte_mask
|= _PAGE_NX
;
78 static inline void set_nx(void)
84 void __cpuinit
check_efer(void)
88 rdmsrl(MSR_EFER
, efer
);
89 if (!(efer
& EFER_NX
) || disable_nx
)
90 __supported_pte_mask
&= ~_PAGE_NX
;
94 static void __init
find_early_table_space(unsigned long end
, int use_pse
,
97 unsigned long puds
, pmds
, ptes
, tables
, start
;
99 puds
= (end
+ PUD_SIZE
- 1) >> PUD_SHIFT
;
100 tables
= roundup(puds
* sizeof(pud_t
), PAGE_SIZE
);
105 extra
= end
- ((end
>>PUD_SHIFT
) << PUD_SHIFT
);
106 pmds
= (extra
+ PMD_SIZE
- 1) >> PMD_SHIFT
;
108 pmds
= (end
+ PMD_SIZE
- 1) >> PMD_SHIFT
;
110 tables
+= roundup(pmds
* sizeof(pmd_t
), PAGE_SIZE
);
115 extra
= end
- ((end
>>PMD_SHIFT
) << PMD_SHIFT
);
119 ptes
= (extra
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
121 ptes
= (end
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
123 tables
+= roundup(ptes
* sizeof(pte_t
), PAGE_SIZE
);
127 tables
+= roundup(__end_of_fixed_addresses
* sizeof(pte_t
), PAGE_SIZE
);
131 * RED-PEN putting page tables only on node 0 could
132 * cause a hotspot and fill up ZONE_DMA. The page tables
133 * need roughly 0.5KB per GB.
140 e820_table_start
= find_e820_area(start
, max_pfn_mapped
<<PAGE_SHIFT
,
142 if (e820_table_start
== -1UL)
143 panic("Cannot find space for the kernel page tables");
145 e820_table_start
>>= PAGE_SHIFT
;
146 e820_table_end
= e820_table_start
;
147 e820_table_top
= e820_table_start
+ (tables
>> PAGE_SHIFT
);
149 printk(KERN_DEBUG
"kernel direct mapping tables up to %lx @ %lx-%lx\n",
150 end
, e820_table_start
<< PAGE_SHIFT
, e820_table_top
<< PAGE_SHIFT
);
156 unsigned page_size_mask
;
160 #define NR_RANGE_MR 3
161 #else /* CONFIG_X86_64 */
162 #define NR_RANGE_MR 5
165 static int __meminit
save_mr(struct map_range
*mr
, int nr_range
,
166 unsigned long start_pfn
, unsigned long end_pfn
,
167 unsigned long page_size_mask
)
169 if (start_pfn
< end_pfn
) {
170 if (nr_range
>= NR_RANGE_MR
)
171 panic("run out of range for init_memory_mapping\n");
172 mr
[nr_range
].start
= start_pfn
<<PAGE_SHIFT
;
173 mr
[nr_range
].end
= end_pfn
<<PAGE_SHIFT
;
174 mr
[nr_range
].page_size_mask
= page_size_mask
;
182 * Setup the direct mapping of the physical memory at PAGE_OFFSET.
183 * This runs before bootmem is initialized and gets pages directly from
184 * the physical memory. To access them they are temporarily mapped.
186 unsigned long __init_refok
init_memory_mapping(unsigned long start
,
189 unsigned long page_size_mask
= 0;
190 unsigned long start_pfn
, end_pfn
;
191 unsigned long ret
= 0;
194 struct map_range mr
[NR_RANGE_MR
];
196 int use_pse
, use_gbpages
;
198 printk(KERN_INFO
"init_memory_mapping: %016lx-%016lx\n", start
, end
);
200 #if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KMEMCHECK)
202 * For CONFIG_DEBUG_PAGEALLOC, identity mapping will use small pages.
203 * This will simplify cpa(), which otherwise needs to support splitting
204 * large pages into small in interrupt context, etc.
206 use_pse
= use_gbpages
= 0;
208 use_pse
= cpu_has_pse
;
209 use_gbpages
= direct_gbpages
;
214 printk(KERN_INFO
"NX (Execute Disable) protection: active\n");
216 /* Enable PSE if available */
218 set_in_cr4(X86_CR4_PSE
);
220 /* Enable PGE if available */
222 set_in_cr4(X86_CR4_PGE
);
223 __supported_pte_mask
|= _PAGE_GLOBAL
;
227 page_size_mask
|= 1 << PG_LEVEL_1G
;
229 page_size_mask
|= 1 << PG_LEVEL_2M
;
231 memset(mr
, 0, sizeof(mr
));
234 /* head if not big page alignment ? */
235 start_pfn
= start
>> PAGE_SHIFT
;
236 pos
= start_pfn
<< PAGE_SHIFT
;
239 * Don't use a large page for the first 2/4MB of memory
240 * because there are often fixed size MTRRs in there
241 * and overlapping MTRRs into large pages can cause
245 end_pfn
= 1<<(PMD_SHIFT
- PAGE_SHIFT
);
247 end_pfn
= ((pos
+ (PMD_SIZE
- 1))>>PMD_SHIFT
)
248 << (PMD_SHIFT
- PAGE_SHIFT
);
249 #else /* CONFIG_X86_64 */
250 end_pfn
= ((pos
+ (PMD_SIZE
- 1)) >> PMD_SHIFT
)
251 << (PMD_SHIFT
- PAGE_SHIFT
);
253 if (end_pfn
> (end
>> PAGE_SHIFT
))
254 end_pfn
= end
>> PAGE_SHIFT
;
255 if (start_pfn
< end_pfn
) {
256 nr_range
= save_mr(mr
, nr_range
, start_pfn
, end_pfn
, 0);
257 pos
= end_pfn
<< PAGE_SHIFT
;
260 /* big page (2M) range */
261 start_pfn
= ((pos
+ (PMD_SIZE
- 1))>>PMD_SHIFT
)
262 << (PMD_SHIFT
- PAGE_SHIFT
);
264 end_pfn
= (end
>>PMD_SHIFT
) << (PMD_SHIFT
- PAGE_SHIFT
);
265 #else /* CONFIG_X86_64 */
266 end_pfn
= ((pos
+ (PUD_SIZE
- 1))>>PUD_SHIFT
)
267 << (PUD_SHIFT
- PAGE_SHIFT
);
268 if (end_pfn
> ((end
>>PMD_SHIFT
)<<(PMD_SHIFT
- PAGE_SHIFT
)))
269 end_pfn
= ((end
>>PMD_SHIFT
)<<(PMD_SHIFT
- PAGE_SHIFT
));
272 if (start_pfn
< end_pfn
) {
273 nr_range
= save_mr(mr
, nr_range
, start_pfn
, end_pfn
,
274 page_size_mask
& (1<<PG_LEVEL_2M
));
275 pos
= end_pfn
<< PAGE_SHIFT
;
279 /* big page (1G) range */
280 start_pfn
= ((pos
+ (PUD_SIZE
- 1))>>PUD_SHIFT
)
281 << (PUD_SHIFT
- PAGE_SHIFT
);
282 end_pfn
= (end
>> PUD_SHIFT
) << (PUD_SHIFT
- PAGE_SHIFT
);
283 if (start_pfn
< end_pfn
) {
284 nr_range
= save_mr(mr
, nr_range
, start_pfn
, end_pfn
,
286 ((1<<PG_LEVEL_2M
)|(1<<PG_LEVEL_1G
)));
287 pos
= end_pfn
<< PAGE_SHIFT
;
290 /* tail is not big page (1G) alignment */
291 start_pfn
= ((pos
+ (PMD_SIZE
- 1))>>PMD_SHIFT
)
292 << (PMD_SHIFT
- PAGE_SHIFT
);
293 end_pfn
= (end
>> PMD_SHIFT
) << (PMD_SHIFT
- PAGE_SHIFT
);
294 if (start_pfn
< end_pfn
) {
295 nr_range
= save_mr(mr
, nr_range
, start_pfn
, end_pfn
,
296 page_size_mask
& (1<<PG_LEVEL_2M
));
297 pos
= end_pfn
<< PAGE_SHIFT
;
301 /* tail is not big page (2M) alignment */
302 start_pfn
= pos
>>PAGE_SHIFT
;
303 end_pfn
= end
>>PAGE_SHIFT
;
304 nr_range
= save_mr(mr
, nr_range
, start_pfn
, end_pfn
, 0);
306 /* try to merge same page size and continuous */
307 for (i
= 0; nr_range
> 1 && i
< nr_range
- 1; i
++) {
308 unsigned long old_start
;
309 if (mr
[i
].end
!= mr
[i
+1].start
||
310 mr
[i
].page_size_mask
!= mr
[i
+1].page_size_mask
)
313 old_start
= mr
[i
].start
;
314 memmove(&mr
[i
], &mr
[i
+1],
315 (nr_range
- 1 - i
) * sizeof(struct map_range
));
316 mr
[i
--].start
= old_start
;
320 for (i
= 0; i
< nr_range
; i
++)
321 printk(KERN_DEBUG
" %010lx - %010lx page %s\n",
322 mr
[i
].start
, mr
[i
].end
,
323 (mr
[i
].page_size_mask
& (1<<PG_LEVEL_1G
))?"1G":(
324 (mr
[i
].page_size_mask
& (1<<PG_LEVEL_2M
))?"2M":"4k"));
327 * Find space for the kernel direct mapping tables.
329 * Later we should allocate these tables in the local node of the
330 * memory mapped. Unfortunately this is done currently before the
331 * nodes are discovered.
334 find_early_table_space(end
, use_pse
, use_gbpages
);
337 for (i
= 0; i
< nr_range
; i
++)
338 kernel_physical_mapping_init(mr
[i
].start
, mr
[i
].end
,
339 mr
[i
].page_size_mask
);
341 #else /* CONFIG_X86_64 */
342 for (i
= 0; i
< nr_range
; i
++)
343 ret
= kernel_physical_mapping_init(mr
[i
].start
, mr
[i
].end
,
344 mr
[i
].page_size_mask
);
348 early_ioremap_page_table_range_init();
350 load_cr3(swapper_pg_dir
);
354 if (!after_bootmem
&& !start
) {
358 mmu_cr4_features
= read_cr4();
361 * _brk_end cannot change anymore, but it and _end may be
362 * located on different 2M pages. cleanup_highmap(), however,
363 * can only consider _end when it runs, so destroy any
364 * mappings beyond _brk_end here.
366 pud
= pud_offset(pgd_offset_k(_brk_end
), _brk_end
);
367 pmd
= pmd_offset(pud
, _brk_end
- 1);
368 while (++pmd
<= pmd_offset(pud
, (unsigned long)_end
- 1))
374 if (!after_bootmem
&& e820_table_end
> e820_table_start
)
375 reserve_early(e820_table_start
<< PAGE_SHIFT
,
376 e820_table_end
<< PAGE_SHIFT
, "PGTABLE");
379 early_memtest(start
, end
);
381 return ret
>> PAGE_SHIFT
;
386 * devmem_is_allowed() checks to see if /dev/mem access to a certain address
387 * is valid. The argument is a physical page number.
390 * On x86, access has to be given to the first megabyte of ram because that area
391 * contains bios code and data regions used by X and dosemu and similar apps.
392 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
393 * mmio resources as well as potential bios/acpi data regions.
395 int devmem_is_allowed(unsigned long pagenr
)
399 if (iomem_is_exclusive(pagenr
<< PAGE_SHIFT
))
401 if (!page_is_ram(pagenr
))
406 void free_init_pages(char *what
, unsigned long begin
, unsigned long end
)
408 unsigned long addr
= begin
;
414 * If debugging page accesses then do not free this memory but
415 * mark them not present - any buggy init-section access will
416 * create a kernel page fault:
418 #ifdef CONFIG_DEBUG_PAGEALLOC
419 printk(KERN_INFO
"debug: unmapping init memory %08lx..%08lx\n",
420 begin
, PAGE_ALIGN(end
));
421 set_memory_np(begin
, (end
- begin
) >> PAGE_SHIFT
);
424 * We just marked the kernel text read only above, now that
425 * we are going to free part of that, we need to make that
428 set_memory_rw(begin
, (end
- begin
) >> PAGE_SHIFT
);
430 printk(KERN_INFO
"Freeing %s: %luk freed\n", what
, (end
- begin
) >> 10);
432 for (; addr
< end
; addr
+= PAGE_SIZE
) {
433 ClearPageReserved(virt_to_page(addr
));
434 init_page_count(virt_to_page(addr
));
435 memset((void *)(addr
& ~(PAGE_SIZE
-1)),
436 POISON_FREE_INITMEM
, PAGE_SIZE
);
443 void free_initmem(void)
445 free_init_pages("unused kernel memory",
446 (unsigned long)(&__init_begin
),
447 (unsigned long)(&__init_end
));
450 #ifdef CONFIG_BLK_DEV_INITRD
451 void free_initrd_mem(unsigned long start
, unsigned long end
)
453 free_init_pages("initrd memory", start
, end
);