2 * linux/arch/i386/mm/init.c
4 * Copyright (C) 1995 Linus Torvalds
6 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
9 #include <linux/config.h>
10 #include <linux/module.h>
11 #include <linux/signal.h>
12 #include <linux/sched.h>
13 #include <linux/kernel.h>
14 #include <linux/errno.h>
15 #include <linux/string.h>
16 #include <linux/types.h>
17 #include <linux/ptrace.h>
18 #include <linux/mman.h>
20 #include <linux/hugetlb.h>
21 #include <linux/swap.h>
22 #include <linux/smp.h>
23 #include <linux/init.h>
24 #include <linux/highmem.h>
25 #include <linux/pagemap.h>
26 #include <linux/bootmem.h>
27 #include <linux/slab.h>
28 #include <linux/proc_fs.h>
29 #include <linux/efi.h>
30 #include <linux/memory_hotplug.h>
32 #include <asm/processor.h>
33 #include <asm/system.h>
34 #include <asm/uaccess.h>
35 #include <asm/pgtable.h>
37 #include <asm/fixmap.h>
41 #include <asm/tlbflush.h>
42 #include <asm/sections.h>
44 unsigned int __VMALLOC_RESERVE
= 128 << 20;
46 DEFINE_PER_CPU(struct mmu_gather
, mmu_gathers
);
47 unsigned long highstart_pfn
, highend_pfn
;
49 static int noinline
do_test_wp_bit(void);
52 * Creates a middle page table and puts a pointer to it in the
53 * given global directory entry. This only returns the gd entry
54 * in non-PAE compilation mode, since the middle layer is folded.
56 static pmd_t
* __init
one_md_table_init(pgd_t
*pgd
)
62 pmd_table
= (pmd_t
*) alloc_bootmem_low_pages(PAGE_SIZE
);
63 set_pgd(pgd
, __pgd(__pa(pmd_table
) | _PAGE_PRESENT
));
64 pud
= pud_offset(pgd
, 0);
65 if (pmd_table
!= pmd_offset(pud
, 0))
68 pud
= pud_offset(pgd
, 0);
69 pmd_table
= pmd_offset(pud
, 0);
76 * Create a page table and place a pointer to it in a middle page
79 static pte_t
* __init
one_page_table_init(pmd_t
*pmd
)
82 pte_t
*page_table
= (pte_t
*) alloc_bootmem_low_pages(PAGE_SIZE
);
83 set_pmd(pmd
, __pmd(__pa(page_table
) | _PAGE_TABLE
));
84 if (page_table
!= pte_offset_kernel(pmd
, 0))
90 return pte_offset_kernel(pmd
, 0);
94 * This function initializes a certain range of kernel virtual memory
95 * with new bootmem page tables, everywhere page tables are missing in
100 * NOTE: The pagetables are allocated contiguous on the physical space
101 * so we can cache the place of the first one and move around without
102 * checking the pgd every time.
104 static void __init
page_table_range_init (unsigned long start
, unsigned long end
, pgd_t
*pgd_base
)
109 int pgd_idx
, pmd_idx
;
113 pgd_idx
= pgd_index(vaddr
);
114 pmd_idx
= pmd_index(vaddr
);
115 pgd
= pgd_base
+ pgd_idx
;
117 for ( ; (pgd_idx
< PTRS_PER_PGD
) && (vaddr
!= end
); pgd
++, pgd_idx
++) {
119 one_md_table_init(pgd
);
120 pud
= pud_offset(pgd
, vaddr
);
121 pmd
= pmd_offset(pud
, vaddr
);
122 for (; (pmd_idx
< PTRS_PER_PMD
) && (vaddr
!= end
); pmd
++, pmd_idx
++) {
124 one_page_table_init(pmd
);
132 static inline int is_kernel_text(unsigned long addr
)
134 if (addr
>= PAGE_OFFSET
&& addr
<= (unsigned long)__init_end
)
140 * This maps the physical memory to kernel virtual address space, a total
141 * of max_low_pfn pages, by creating page tables starting from address
144 static void __init
kernel_physical_mapping_init(pgd_t
*pgd_base
)
150 int pgd_idx
, pmd_idx
, pte_ofs
;
152 pgd_idx
= pgd_index(PAGE_OFFSET
);
153 pgd
= pgd_base
+ pgd_idx
;
156 for (; pgd_idx
< PTRS_PER_PGD
; pgd
++, pgd_idx
++) {
157 pmd
= one_md_table_init(pgd
);
158 if (pfn
>= max_low_pfn
)
160 for (pmd_idx
= 0; pmd_idx
< PTRS_PER_PMD
&& pfn
< max_low_pfn
; pmd
++, pmd_idx
++) {
161 unsigned int address
= pfn
* PAGE_SIZE
+ PAGE_OFFSET
;
163 /* Map with big pages if possible, otherwise create normal page tables. */
165 unsigned int address2
= (pfn
+ PTRS_PER_PTE
- 1) * PAGE_SIZE
+ PAGE_OFFSET
+ PAGE_SIZE
-1;
167 if (is_kernel_text(address
) || is_kernel_text(address2
))
168 set_pmd(pmd
, pfn_pmd(pfn
, PAGE_KERNEL_LARGE_EXEC
));
170 set_pmd(pmd
, pfn_pmd(pfn
, PAGE_KERNEL_LARGE
));
173 pte
= one_page_table_init(pmd
);
175 for (pte_ofs
= 0; pte_ofs
< PTRS_PER_PTE
&& pfn
< max_low_pfn
; pte
++, pfn
++, pte_ofs
++) {
176 if (is_kernel_text(address
))
177 set_pte(pte
, pfn_pte(pfn
, PAGE_KERNEL_EXEC
));
179 set_pte(pte
, pfn_pte(pfn
, PAGE_KERNEL
));
186 static inline int page_kills_ppro(unsigned long pagenr
)
188 if (pagenr
>= 0x70000 && pagenr
<= 0x7003F)
193 extern int is_available_memory(efi_memory_desc_t
*);
195 int page_is_ram(unsigned long pagenr
)
198 unsigned long addr
, end
;
201 efi_memory_desc_t
*md
;
204 for (p
= memmap
.map
; p
< memmap
.map_end
; p
+= memmap
.desc_size
) {
206 if (!is_available_memory(md
))
208 addr
= (md
->phys_addr
+PAGE_SIZE
-1) >> PAGE_SHIFT
;
209 end
= (md
->phys_addr
+ (md
->num_pages
<< EFI_PAGE_SHIFT
)) >> PAGE_SHIFT
;
211 if ((pagenr
>= addr
) && (pagenr
< end
))
217 for (i
= 0; i
< e820
.nr_map
; i
++) {
219 if (e820
.map
[i
].type
!= E820_RAM
) /* not usable memory */
222 * !!!FIXME!!! Some BIOSen report areas as RAM that
223 * are not. Notably the 640->1Mb area. We need a sanity
226 addr
= (e820
.map
[i
].addr
+PAGE_SIZE
-1) >> PAGE_SHIFT
;
227 end
= (e820
.map
[i
].addr
+e820
.map
[i
].size
) >> PAGE_SHIFT
;
228 if ((pagenr
>= addr
) && (pagenr
< end
))
234 #ifdef CONFIG_HIGHMEM
238 #define kmap_get_fixmap_pte(vaddr) \
239 pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), vaddr), (vaddr)), (vaddr))
241 static void __init
kmap_init(void)
243 unsigned long kmap_vstart
;
245 /* cache the first kmap pte */
246 kmap_vstart
= __fix_to_virt(FIX_KMAP_BEGIN
);
247 kmap_pte
= kmap_get_fixmap_pte(kmap_vstart
);
249 kmap_prot
= PAGE_KERNEL
;
252 static void __init
permanent_kmaps_init(pgd_t
*pgd_base
)
261 page_table_range_init(vaddr
, vaddr
+ PAGE_SIZE
*LAST_PKMAP
, pgd_base
);
263 pgd
= swapper_pg_dir
+ pgd_index(vaddr
);
264 pud
= pud_offset(pgd
, vaddr
);
265 pmd
= pmd_offset(pud
, vaddr
);
266 pte
= pte_offset_kernel(pmd
, vaddr
);
267 pkmap_page_table
= pte
;
270 void __devinit
free_new_highpage(struct page
*page
)
272 set_page_count(page
, 1);
277 void __init
add_one_highpage_init(struct page
*page
, int pfn
, int bad_ppro
)
279 if (page_is_ram(pfn
) && !(bad_ppro
&& page_kills_ppro(pfn
))) {
280 ClearPageReserved(page
);
281 free_new_highpage(page
);
283 SetPageReserved(page
);
286 static int add_one_highpage_hotplug(struct page
*page
, unsigned long pfn
)
288 free_new_highpage(page
);
290 #ifdef CONFIG_FLATMEM
291 max_mapnr
= max(pfn
, max_mapnr
);
298 * Not currently handling the NUMA case.
299 * Assuming single node and all memory that
300 * has been added dynamically that would be
301 * onlined here is in HIGHMEM
303 void online_page(struct page
*page
)
305 ClearPageReserved(page
);
306 add_one_highpage_hotplug(page
, page_to_pfn(page
));
311 extern void set_highmem_pages_init(int);
313 static void __init
set_highmem_pages_init(int bad_ppro
)
316 for (pfn
= highstart_pfn
; pfn
< highend_pfn
; pfn
++)
317 add_one_highpage_init(pfn_to_page(pfn
), pfn
, bad_ppro
);
318 totalram_pages
+= totalhigh_pages
;
320 #endif /* CONFIG_FLATMEM */
323 #define kmap_init() do { } while (0)
324 #define permanent_kmaps_init(pgd_base) do { } while (0)
325 #define set_highmem_pages_init(bad_ppro) do { } while (0)
326 #endif /* CONFIG_HIGHMEM */
328 unsigned long long __PAGE_KERNEL
= _PAGE_KERNEL
;
329 EXPORT_SYMBOL(__PAGE_KERNEL
);
330 unsigned long long __PAGE_KERNEL_EXEC
= _PAGE_KERNEL_EXEC
;
333 extern void __init
remap_numa_kva(void);
335 #define remap_numa_kva() do {} while (0)
338 static void __init
pagetable_init (void)
341 pgd_t
*pgd_base
= swapper_pg_dir
;
343 #ifdef CONFIG_X86_PAE
345 /* Init entries of the first-level page table to the zero page */
346 for (i
= 0; i
< PTRS_PER_PGD
; i
++)
347 set_pgd(pgd_base
+ i
, __pgd(__pa(empty_zero_page
) | _PAGE_PRESENT
));
350 /* Enable PSE if available */
352 set_in_cr4(X86_CR4_PSE
);
355 /* Enable PGE if available */
357 set_in_cr4(X86_CR4_PGE
);
358 __PAGE_KERNEL
|= _PAGE_GLOBAL
;
359 __PAGE_KERNEL_EXEC
|= _PAGE_GLOBAL
;
362 kernel_physical_mapping_init(pgd_base
);
366 * Fixed mappings, only the page table structure has to be
367 * created - mappings will be set by set_fixmap():
369 vaddr
= __fix_to_virt(__end_of_fixed_addresses
- 1) & PMD_MASK
;
370 page_table_range_init(vaddr
, 0, pgd_base
);
372 permanent_kmaps_init(pgd_base
);
374 #ifdef CONFIG_X86_PAE
376 * Add low memory identity-mappings - SMP needs it when
377 * starting up on an AP from real-mode. In the non-PAE
378 * case we already have these mappings through head.S.
379 * All user-space mappings are explicitly cleared after
382 set_pgd(&pgd_base
[0], pgd_base
[USER_PTRS_PER_PGD
]);
386 #ifdef CONFIG_SOFTWARE_SUSPEND
388 * Swap suspend & friends need this for resume because things like the intel-agp
389 * driver might have split up a kernel 4MB mapping.
391 char __nosavedata swsusp_pg_dir
[PAGE_SIZE
]
392 __attribute__ ((aligned (PAGE_SIZE
)));
394 static inline void save_pg_dir(void)
396 memcpy(swsusp_pg_dir
, swapper_pg_dir
, PAGE_SIZE
);
399 static inline void save_pg_dir(void)
404 void zap_low_mappings (void)
411 * Zap initial low-memory mappings.
413 * Note that "pgd_clear()" doesn't do it for
414 * us, because pgd_clear() is a no-op on i386.
416 for (i
= 0; i
< USER_PTRS_PER_PGD
; i
++)
417 #ifdef CONFIG_X86_PAE
418 set_pgd(swapper_pg_dir
+i
, __pgd(1 + __pa(empty_zero_page
)));
420 set_pgd(swapper_pg_dir
+i
, __pgd(0));
425 static int disable_nx __initdata
= 0;
426 u64 __supported_pte_mask __read_mostly
= ~_PAGE_NX
;
431 * Control non executable mappings.
436 void __init
noexec_setup(const char *str
)
438 if (!strncmp(str
, "on",2) && cpu_has_nx
) {
439 __supported_pte_mask
|= _PAGE_NX
;
441 } else if (!strncmp(str
,"off",3)) {
443 __supported_pte_mask
&= ~_PAGE_NX
;
448 #ifdef CONFIG_X86_PAE
450 static void __init
set_nx(void)
452 unsigned int v
[4], l
, h
;
454 if (cpu_has_pae
&& (cpuid_eax(0x80000000) > 0x80000001)) {
455 cpuid(0x80000001, &v
[0], &v
[1], &v
[2], &v
[3]);
456 if ((v
[3] & (1 << 20)) && !disable_nx
) {
457 rdmsr(MSR_EFER
, l
, h
);
459 wrmsr(MSR_EFER
, l
, h
);
461 __supported_pte_mask
|= _PAGE_NX
;
467 * Enables/disables executability of a given kernel page and
468 * returns the previous setting.
470 int __init
set_kernel_exec(unsigned long vaddr
, int enable
)
478 pte
= lookup_address(vaddr
);
481 if (!pte_exec_kernel(*pte
))
485 pte
->pte_high
&= ~(1 << (_PAGE_BIT_NX
- 32));
487 pte
->pte_high
|= 1 << (_PAGE_BIT_NX
- 32);
496 * paging_init() sets up the page tables - note that the first 8MB are
497 * already mapped by head.S.
499 * This routines also unmaps the page at virtual kernel address 0, so
500 * that we can trap those pesky NULL-reference errors in the kernel.
502 void __init
paging_init(void)
504 #ifdef CONFIG_X86_PAE
507 printk("NX (Execute Disable) protection: active\n");
512 load_cr3(swapper_pg_dir
);
514 #ifdef CONFIG_X86_PAE
516 * We will bail out later - printk doesn't work right now so
517 * the user would just see a hanging kernel.
520 set_in_cr4(X86_CR4_PAE
);
528 * Test if the WP bit works in supervisor mode. It isn't supported on 386's
529 * and also on some strange 486's (NexGen etc.). All 586+'s are OK. This
530 * used to involve black magic jumps to work around some nasty CPU bugs,
531 * but fortunately the switch to using exceptions got rid of all that.
534 static void __init
test_wp_bit(void)
536 printk("Checking if this processor honours the WP bit even in supervisor mode... ");
538 /* Any page-aligned address will do, the test is non-destructive */
539 __set_fixmap(FIX_WP_TEST
, __pa(&swapper_pg_dir
), PAGE_READONLY
);
540 boot_cpu_data
.wp_works_ok
= do_test_wp_bit();
541 clear_fixmap(FIX_WP_TEST
);
543 if (!boot_cpu_data
.wp_works_ok
) {
545 #ifdef CONFIG_X86_WP_WORKS_OK
546 panic("This kernel doesn't support CPU's with broken WP. Recompile it for a 386!");
553 static void __init
set_max_mapnr_init(void)
555 #ifdef CONFIG_HIGHMEM
556 num_physpages
= highend_pfn
;
558 num_physpages
= max_low_pfn
;
560 #ifdef CONFIG_FLATMEM
561 max_mapnr
= num_physpages
;
565 static struct kcore_list kcore_mem
, kcore_vmalloc
;
567 void __init
mem_init(void)
569 extern int ppro_with_ram_bug(void);
570 int codesize
, reservedpages
, datasize
, initsize
;
574 #ifdef CONFIG_FLATMEM
579 bad_ppro
= ppro_with_ram_bug();
581 #ifdef CONFIG_HIGHMEM
582 /* check that fixmap and pkmap do not overlap */
583 if (PKMAP_BASE
+LAST_PKMAP
*PAGE_SIZE
>= FIXADDR_START
) {
584 printk(KERN_ERR
"fixmap and kmap areas overlap - this will crash\n");
585 printk(KERN_ERR
"pkstart: %lxh pkend: %lxh fixstart %lxh\n",
586 PKMAP_BASE
, PKMAP_BASE
+LAST_PKMAP
*PAGE_SIZE
, FIXADDR_START
);
591 set_max_mapnr_init();
593 #ifdef CONFIG_HIGHMEM
594 high_memory
= (void *) __va(highstart_pfn
* PAGE_SIZE
- 1) + 1;
596 high_memory
= (void *) __va(max_low_pfn
* PAGE_SIZE
- 1) + 1;
599 /* this will put all low memory onto the freelists */
600 totalram_pages
+= free_all_bootmem();
603 for (tmp
= 0; tmp
< max_low_pfn
; tmp
++)
605 * Only count reserved RAM pages
607 if (page_is_ram(tmp
) && PageReserved(pfn_to_page(tmp
)))
610 set_highmem_pages_init(bad_ppro
);
612 codesize
= (unsigned long) &_etext
- (unsigned long) &_text
;
613 datasize
= (unsigned long) &_edata
- (unsigned long) &_etext
;
614 initsize
= (unsigned long) &__init_end
- (unsigned long) &__init_begin
;
616 kclist_add(&kcore_mem
, __va(0), max_low_pfn
<< PAGE_SHIFT
);
617 kclist_add(&kcore_vmalloc
, (void *)VMALLOC_START
,
618 VMALLOC_END
-VMALLOC_START
);
620 printk(KERN_INFO
"Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init, %ldk highmem)\n",
621 (unsigned long) nr_free_pages() << (PAGE_SHIFT
-10),
622 num_physpages
<< (PAGE_SHIFT
-10),
624 reservedpages
<< (PAGE_SHIFT
-10),
627 (unsigned long) (totalhigh_pages
<< (PAGE_SHIFT
-10))
630 #ifdef CONFIG_X86_PAE
632 panic("cannot execute a PAE-enabled kernel on a PAE-less CPU!");
634 if (boot_cpu_data
.wp_works_ok
< 0)
638 * Subtle. SMP is doing it's boot stuff late (because it has to
639 * fork idle threads) - but it also needs low mappings for the
640 * protected-mode entry to work. We zap these entries only after
641 * the WP-bit has been tested.
649 * this is for the non-NUMA, single node SMP system case.
650 * Specifically, in the case of x86, we will always add
651 * memory to the highmem for now.
653 #ifndef CONFIG_NEED_MULTIPLE_NODES
654 int add_memory(u64 start
, u64 size
)
656 struct pglist_data
*pgdata
= &contig_page_data
;
657 struct zone
*zone
= pgdata
->node_zones
+ MAX_NR_ZONES
-1;
658 unsigned long start_pfn
= start
>> PAGE_SHIFT
;
659 unsigned long nr_pages
= size
>> PAGE_SHIFT
;
661 return __add_pages(zone
, start_pfn
, nr_pages
);
664 int remove_memory(u64 start
, u64 size
)
670 kmem_cache_t
*pgd_cache
;
671 kmem_cache_t
*pmd_cache
;
673 void __init
pgtable_cache_init(void)
675 if (PTRS_PER_PMD
> 1) {
676 pmd_cache
= kmem_cache_create("pmd",
677 PTRS_PER_PMD
*sizeof(pmd_t
),
678 PTRS_PER_PMD
*sizeof(pmd_t
),
683 panic("pgtable_cache_init(): cannot create pmd cache");
685 pgd_cache
= kmem_cache_create("pgd",
686 PTRS_PER_PGD
*sizeof(pgd_t
),
687 PTRS_PER_PGD
*sizeof(pgd_t
),
690 PTRS_PER_PMD
== 1 ? pgd_dtor
: NULL
);
692 panic("pgtable_cache_init(): Cannot create pgd cache");
696 * This function cannot be __init, since exceptions don't work in that
697 * section. Put this after the callers, so that it cannot be inlined.
699 static int noinline
do_test_wp_bit(void)
704 __asm__
__volatile__(
709 ".section __ex_table,\"a\"\n"
713 :"=m" (*(char *)fix_to_virt(FIX_WP_TEST
)),
722 void free_initmem(void)
726 addr
= (unsigned long)(&__init_begin
);
727 for (; addr
< (unsigned long)(&__init_end
); addr
+= PAGE_SIZE
) {
728 ClearPageReserved(virt_to_page(addr
));
729 set_page_count(virt_to_page(addr
), 1);
730 memset((void *)addr
, 0xcc, PAGE_SIZE
);
734 printk (KERN_INFO
"Freeing unused kernel memory: %dk freed\n", (__init_end
- __init_begin
) >> 10);
737 #ifdef CONFIG_BLK_DEV_INITRD
738 void free_initrd_mem(unsigned long start
, unsigned long end
)
741 printk (KERN_INFO
"Freeing initrd memory: %ldk freed\n", (end
- start
) >> 10);
742 for (; start
< end
; start
+= PAGE_SIZE
) {
743 ClearPageReserved(virt_to_page(start
));
744 set_page_count(virt_to_page(start
), 1);