2 * linux/arch/x86_64/mm/init.c
4 * Copyright (C) 1995 Linus Torvalds
5 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
6 * Copyright (C) 2002,2003 Andi Kleen <ak@suse.de>
9 #include <linux/config.h>
10 #include <linux/signal.h>
11 #include <linux/sched.h>
12 #include <linux/kernel.h>
13 #include <linux/errno.h>
14 #include <linux/string.h>
15 #include <linux/types.h>
16 #include <linux/ptrace.h>
17 #include <linux/mman.h>
19 #include <linux/swap.h>
20 #include <linux/smp.h>
21 #include <linux/init.h>
22 #include <linux/pagemap.h>
23 #include <linux/bootmem.h>
24 #include <linux/proc_fs.h>
25 #include <linux/pci.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/module.h>
28 #include <linux/memory_hotplug.h>
30 #include <asm/processor.h>
31 #include <asm/system.h>
32 #include <asm/uaccess.h>
33 #include <asm/pgtable.h>
34 #include <asm/pgalloc.h>
36 #include <asm/fixmap.h>
40 #include <asm/mmu_context.h>
41 #include <asm/proto.h>
43 #include <asm/sections.h>
44 #include <asm/dma-mapping.h>
45 #include <asm/swiotlb.h>
51 struct dma_mapping_ops
* dma_ops
;
52 EXPORT_SYMBOL(dma_ops
);
54 static unsigned long dma_reserve __initdata
;
56 DEFINE_PER_CPU(struct mmu_gather
, mmu_gathers
);
59 * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the
60 * physical space so we can cache the place of the first one and move
61 * around without checking the pgd every time.
66 long i
, total
= 0, reserved
= 0;
67 long shared
= 0, cached
= 0;
71 printk(KERN_INFO
"Mem-info:\n");
73 printk(KERN_INFO
"Free swap: %6ldkB\n", nr_swap_pages
<<(PAGE_SHIFT
-10));
75 for_each_online_pgdat(pgdat
) {
76 for (i
= 0; i
< pgdat
->node_spanned_pages
; ++i
) {
77 page
= pfn_to_page(pgdat
->node_start_pfn
+ i
);
79 if (PageReserved(page
))
81 else if (PageSwapCache(page
))
83 else if (page_count(page
))
84 shared
+= page_count(page
) - 1;
87 printk(KERN_INFO
"%lu pages of RAM\n", total
);
88 printk(KERN_INFO
"%lu reserved pages\n",reserved
);
89 printk(KERN_INFO
"%lu pages shared\n",shared
);
90 printk(KERN_INFO
"%lu pages swap cached\n",cached
);
93 /* References to section boundaries */
97 static __init
void *spp_getpage(void)
101 ptr
= (void *) get_zeroed_page(GFP_ATOMIC
);
103 ptr
= alloc_bootmem_pages(PAGE_SIZE
);
104 if (!ptr
|| ((unsigned long)ptr
& ~PAGE_MASK
))
105 panic("set_pte_phys: cannot allocate page data %s\n", after_bootmem
?"after bootmem":"");
107 Dprintk("spp_getpage %p\n", ptr
);
111 static __init
void set_pte_phys(unsigned long vaddr
,
112 unsigned long phys
, pgprot_t prot
)
119 Dprintk("set_pte_phys %lx to %lx\n", vaddr
, phys
);
121 pgd
= pgd_offset_k(vaddr
);
122 if (pgd_none(*pgd
)) {
123 printk("PGD FIXMAP MISSING, it should be setup in head.S!\n");
126 pud
= pud_offset(pgd
, vaddr
);
127 if (pud_none(*pud
)) {
128 pmd
= (pmd_t
*) spp_getpage();
129 set_pud(pud
, __pud(__pa(pmd
) | _KERNPG_TABLE
| _PAGE_USER
));
130 if (pmd
!= pmd_offset(pud
, 0)) {
131 printk("PAGETABLE BUG #01! %p <-> %p\n", pmd
, pmd_offset(pud
,0));
135 pmd
= pmd_offset(pud
, vaddr
);
136 if (pmd_none(*pmd
)) {
137 pte
= (pte_t
*) spp_getpage();
138 set_pmd(pmd
, __pmd(__pa(pte
) | _KERNPG_TABLE
| _PAGE_USER
));
139 if (pte
!= pte_offset_kernel(pmd
, 0)) {
140 printk("PAGETABLE BUG #02!\n");
144 new_pte
= pfn_pte(phys
>> PAGE_SHIFT
, prot
);
146 pte
= pte_offset_kernel(pmd
, vaddr
);
147 if (!pte_none(*pte
) &&
148 pte_val(*pte
) != (pte_val(new_pte
) & __supported_pte_mask
))
150 set_pte(pte
, new_pte
);
153 * It's enough to flush this one mapping.
154 * (PGE mappings get flushed as well)
156 __flush_tlb_one(vaddr
);
159 /* NOTE: this is meant to be run only at boot */
161 __set_fixmap (enum fixed_addresses idx
, unsigned long phys
, pgprot_t prot
)
163 unsigned long address
= __fix_to_virt(idx
);
165 if (idx
>= __end_of_fixed_addresses
) {
166 printk("Invalid __set_fixmap\n");
169 set_pte_phys(address
, phys
, prot
);
172 unsigned long __initdata table_start
, table_end
;
174 extern pmd_t temp_boot_pmds
[];
176 static struct temp_map
{
180 } temp_mappings
[] __initdata
= {
181 { &temp_boot_pmds
[0], (void *)(40UL * 1024 * 1024) },
182 { &temp_boot_pmds
[1], (void *)(42UL * 1024 * 1024) },
186 static __meminit
void *alloc_low_page(int *index
, unsigned long *phys
)
190 unsigned long pfn
= table_end
++, paddr
;
194 adr
= (void *)get_zeroed_page(GFP_ATOMIC
);
200 panic("alloc_low_page: ran out of memory");
201 for (i
= 0; temp_mappings
[i
].allocated
; i
++) {
202 if (!temp_mappings
[i
].pmd
)
203 panic("alloc_low_page: ran out of temp mappings");
205 ti
= &temp_mappings
[i
];
206 paddr
= (pfn
<< PAGE_SHIFT
) & PMD_MASK
;
207 set_pmd(ti
->pmd
, __pmd(paddr
| _KERNPG_TABLE
| _PAGE_PSE
));
210 adr
= ti
->address
+ ((pfn
<< PAGE_SHIFT
) & ~PMD_MASK
);
211 memset(adr
, 0, PAGE_SIZE
);
213 *phys
= pfn
* PAGE_SIZE
;
217 static __meminit
void unmap_low_page(int i
)
224 ti
= &temp_mappings
[i
];
225 set_pmd(ti
->pmd
, __pmd(0));
229 /* Must run before zap_low_mappings */
230 __init
void *early_ioremap(unsigned long addr
, unsigned long size
)
232 unsigned long map
= round_down(addr
, LARGE_PAGE_SIZE
);
234 /* actually usually some more */
235 if (size
>= LARGE_PAGE_SIZE
) {
236 printk("SMBIOS area too long %lu\n", size
);
239 set_pmd(temp_mappings
[0].pmd
, __pmd(map
| _KERNPG_TABLE
| _PAGE_PSE
));
240 map
+= LARGE_PAGE_SIZE
;
241 set_pmd(temp_mappings
[1].pmd
, __pmd(map
| _KERNPG_TABLE
| _PAGE_PSE
));
243 return temp_mappings
[0].address
+ (addr
& (LARGE_PAGE_SIZE
-1));
246 /* To avoid virtual aliases later */
247 __init
void early_iounmap(void *addr
, unsigned long size
)
249 if ((void *)round_down((unsigned long)addr
, LARGE_PAGE_SIZE
) != temp_mappings
[0].address
)
250 printk("early_iounmap: bad address %p\n", addr
);
251 set_pmd(temp_mappings
[0].pmd
, __pmd(0));
252 set_pmd(temp_mappings
[1].pmd
, __pmd(0));
256 static void __meminit
257 phys_pmd_init(pmd_t
*pmd
, unsigned long address
, unsigned long end
)
261 for (i
= 0; i
< PTRS_PER_PMD
; pmd
++, i
++, address
+= PMD_SIZE
) {
265 for (; i
< PTRS_PER_PMD
; i
++, pmd
++)
266 set_pmd(pmd
, __pmd(0));
269 entry
= _PAGE_NX
|_PAGE_PSE
|_KERNPG_TABLE
|_PAGE_GLOBAL
|address
;
270 entry
&= __supported_pte_mask
;
271 set_pmd(pmd
, __pmd(entry
));
275 static void __meminit
276 phys_pmd_update(pud_t
*pud
, unsigned long address
, unsigned long end
)
278 pmd_t
*pmd
= pmd_offset(pud
, (unsigned long)__va(address
));
280 if (pmd_none(*pmd
)) {
281 spin_lock(&init_mm
.page_table_lock
);
282 phys_pmd_init(pmd
, address
, end
);
283 spin_unlock(&init_mm
.page_table_lock
);
288 static void __meminit
phys_pud_init(pud_t
*pud
, unsigned long address
, unsigned long end
)
290 long i
= pud_index(address
);
294 if (after_bootmem
&& pud_val(*pud
)) {
295 phys_pmd_update(pud
, address
, end
);
299 for (; i
< PTRS_PER_PUD
; pud
++, i
++) {
301 unsigned long paddr
, pmd_phys
;
304 paddr
= (address
& PGDIR_MASK
) + i
*PUD_SIZE
;
308 if (!after_bootmem
&& !e820_any_mapped(paddr
, paddr
+PUD_SIZE
, 0)) {
309 set_pud(pud
, __pud(0));
313 pmd
= alloc_low_page(&map
, &pmd_phys
);
314 spin_lock(&init_mm
.page_table_lock
);
315 set_pud(pud
, __pud(pmd_phys
| _KERNPG_TABLE
));
316 phys_pmd_init(pmd
, paddr
, end
);
317 spin_unlock(&init_mm
.page_table_lock
);
323 static void __init
find_early_table_space(unsigned long end
)
325 unsigned long puds
, pmds
, tables
, start
;
327 puds
= (end
+ PUD_SIZE
- 1) >> PUD_SHIFT
;
328 pmds
= (end
+ PMD_SIZE
- 1) >> PMD_SHIFT
;
329 tables
= round_up(puds
* sizeof(pud_t
), PAGE_SIZE
) +
330 round_up(pmds
* sizeof(pmd_t
), PAGE_SIZE
);
332 /* RED-PEN putting page tables only on node 0 could
333 cause a hotspot and fill up ZONE_DMA. The page tables
334 need roughly 0.5KB per GB. */
336 table_start
= find_e820_area(start
, end
, tables
);
337 if (table_start
== -1UL)
338 panic("Cannot find space for the kernel page tables");
340 table_start
>>= PAGE_SHIFT
;
341 table_end
= table_start
;
343 early_printk("kernel direct mapping tables up to %lx @ %lx-%lx\n",
344 end
, table_start
<< PAGE_SHIFT
, table_end
<< PAGE_SHIFT
);
347 /* Setup the direct mapping of the physical memory at PAGE_OFFSET.
348 This runs before bootmem is initialized and gets pages directly from the
349 physical memory. To access them they are temporarily mapped. */
350 void __meminit
init_memory_mapping(unsigned long start
, unsigned long end
)
354 Dprintk("init_memory_mapping\n");
357 * Find space for the kernel direct mapping tables.
358 * Later we should allocate these tables in the local node of the memory
359 * mapped. Unfortunately this is done currently before the nodes are
363 find_early_table_space(end
);
365 start
= (unsigned long)__va(start
);
366 end
= (unsigned long)__va(end
);
368 for (; start
< end
; start
= next
) {
370 unsigned long pud_phys
;
371 pgd_t
*pgd
= pgd_offset_k(start
);
375 pud
= pud_offset_k(pgd
, start
& PGDIR_MASK
);
377 pud
= alloc_low_page(&map
, &pud_phys
);
379 next
= start
+ PGDIR_SIZE
;
382 phys_pud_init(pud
, __pa(start
), __pa(next
));
384 set_pgd(pgd_offset_k(start
), mk_kernel_pgd(pud_phys
));
389 asm volatile("movq %%cr4,%0" : "=r" (mmu_cr4_features
));
393 void __cpuinit
zap_low_mappings(int cpu
)
396 pgd_t
*pgd
= pgd_offset_k(0UL);
400 * For AP's, zap the low identity mappings by changing the cr3
401 * to init_level4_pgt and doing local flush tlb all
403 asm volatile("movq %0,%%cr3" :: "r" (__pa_symbol(&init_level4_pgt
)));
408 /* Compute zone sizes for the DMA and DMA32 zones in a node. */
410 size_zones(unsigned long *z
, unsigned long *h
,
411 unsigned long start_pfn
, unsigned long end_pfn
)
416 for (i
= 0; i
< MAX_NR_ZONES
; i
++)
419 if (start_pfn
< MAX_DMA_PFN
)
420 z
[ZONE_DMA
] = MAX_DMA_PFN
- start_pfn
;
421 if (start_pfn
< MAX_DMA32_PFN
) {
422 unsigned long dma32_pfn
= MAX_DMA32_PFN
;
423 if (dma32_pfn
> end_pfn
)
425 z
[ZONE_DMA32
] = dma32_pfn
- start_pfn
;
427 z
[ZONE_NORMAL
] = end_pfn
- start_pfn
;
429 /* Remove lower zones from higher ones. */
431 for (i
= 0; i
< MAX_NR_ZONES
; i
++) {
439 for (i
= 0; i
< MAX_NR_ZONES
; i
++) {
442 h
[i
] = e820_hole_size(s
, w
);
445 /* Add the space pace needed for mem_map to the holes too. */
446 for (i
= 0; i
< MAX_NR_ZONES
; i
++)
447 h
[i
] += (z
[i
] * sizeof(struct page
)) / PAGE_SIZE
;
449 /* The 16MB DMA zone has the kernel and other misc mappings.
452 h
[ZONE_DMA
] += dma_reserve
;
453 if (h
[ZONE_DMA
] >= z
[ZONE_DMA
]) {
455 "Kernel too large and filling up ZONE_DMA?\n");
456 h
[ZONE_DMA
] = z
[ZONE_DMA
];
462 void __init
paging_init(void)
464 unsigned long zones
[MAX_NR_ZONES
], holes
[MAX_NR_ZONES
];
466 memory_present(0, 0, end_pfn
);
468 size_zones(zones
, holes
, 0, end_pfn
);
469 free_area_init_node(0, NODE_DATA(0), zones
,
470 __pa(PAGE_OFFSET
) >> PAGE_SHIFT
, holes
);
474 /* Unmap a kernel mapping if it exists. This is useful to avoid prefetches
475 from the CPU leading to inconsistent cache lines. address and size
476 must be aligned to 2MB boundaries.
477 Does nothing when the mapping doesn't exist. */
478 void __init
clear_kernel_mapping(unsigned long address
, unsigned long size
)
480 unsigned long end
= address
+ size
;
482 BUG_ON(address
& ~LARGE_PAGE_MASK
);
483 BUG_ON(size
& ~LARGE_PAGE_MASK
);
485 for (; address
< end
; address
+= LARGE_PAGE_SIZE
) {
486 pgd_t
*pgd
= pgd_offset_k(address
);
491 pud
= pud_offset(pgd
, address
);
494 pmd
= pmd_offset(pud
, address
);
495 if (!pmd
|| pmd_none(*pmd
))
497 if (0 == (pmd_val(*pmd
) & _PAGE_PSE
)) {
498 /* Could handle this, but it should not happen currently. */
500 "clear_kernel_mapping: mapping has been split. will leak memory\n");
503 set_pmd(pmd
, __pmd(0));
509 * Memory hotplug specific functions
511 #if defined(CONFIG_ACPI_HOTPLUG_MEMORY) || defined(CONFIG_ACPI_HOTPLUG_MEMORY_MODULE)
513 void online_page(struct page
*page
)
515 ClearPageReserved(page
);
516 init_page_count(page
);
522 #ifndef CONFIG_MEMORY_HOTPLUG
524 * Memory Hotadd without sparsemem. The mem_maps have been allocated in advance,
525 * just online the pages.
527 int __add_pages(struct zone
*z
, unsigned long start_pfn
, unsigned long nr_pages
)
531 unsigned long total
= 0, mem
= 0;
532 for (pfn
= start_pfn
; pfn
< start_pfn
+ nr_pages
; pfn
++) {
533 if (pfn_valid(pfn
)) {
534 online_page(pfn_to_page(pfn
));
541 z
->spanned_pages
+= total
;
542 z
->present_pages
+= mem
;
543 z
->zone_pgdat
->node_spanned_pages
+= total
;
544 z
->zone_pgdat
->node_present_pages
+= mem
;
551 * Memory is added always to NORMAL zone. This means you will never get
552 * additional DMA/DMA32 memory.
554 int add_memory(u64 start
, u64 size
)
556 struct pglist_data
*pgdat
= NODE_DATA(0);
557 struct zone
*zone
= pgdat
->node_zones
+ MAX_NR_ZONES
-2;
558 unsigned long start_pfn
= start
>> PAGE_SHIFT
;
559 unsigned long nr_pages
= size
>> PAGE_SHIFT
;
562 ret
= __add_pages(zone
, start_pfn
, nr_pages
);
566 init_memory_mapping(start
, (start
+ size
-1));
570 printk("%s: Problem encountered in __add_pages!\n", __func__
);
573 EXPORT_SYMBOL_GPL(add_memory
);
575 int remove_memory(u64 start
, u64 size
)
579 EXPORT_SYMBOL_GPL(remove_memory
);
583 static struct kcore_list kcore_mem
, kcore_vmalloc
, kcore_kernel
, kcore_modules
,
586 void __init
mem_init(void)
588 long codesize
, reservedpages
, datasize
, initsize
;
590 #ifdef CONFIG_SWIOTLB
595 /* How many end-of-memory variables you have, grandma! */
596 max_low_pfn
= end_pfn
;
598 num_physpages
= end_pfn
;
599 high_memory
= (void *) __va(end_pfn
* PAGE_SIZE
);
601 /* clear the zero-page */
602 memset(empty_zero_page
, 0, PAGE_SIZE
);
606 /* this will put all low memory onto the freelists */
608 totalram_pages
= numa_free_all_bootmem();
610 totalram_pages
= free_all_bootmem();
612 reservedpages
= end_pfn
- totalram_pages
- e820_hole_size(0, end_pfn
);
616 codesize
= (unsigned long) &_etext
- (unsigned long) &_text
;
617 datasize
= (unsigned long) &_edata
- (unsigned long) &_etext
;
618 initsize
= (unsigned long) &__init_end
- (unsigned long) &__init_begin
;
620 /* Register memory areas for /proc/kcore */
621 kclist_add(&kcore_mem
, __va(0), max_low_pfn
<< PAGE_SHIFT
);
622 kclist_add(&kcore_vmalloc
, (void *)VMALLOC_START
,
623 VMALLOC_END
-VMALLOC_START
);
624 kclist_add(&kcore_kernel
, &_stext
, _end
- _stext
);
625 kclist_add(&kcore_modules
, (void *)MODULES_VADDR
, MODULES_LEN
);
626 kclist_add(&kcore_vsyscall
, (void *)VSYSCALL_START
,
627 VSYSCALL_END
- VSYSCALL_START
);
629 printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, %ldk data, %ldk init)\n",
630 (unsigned long) nr_free_pages() << (PAGE_SHIFT
-10),
631 end_pfn
<< (PAGE_SHIFT
-10),
633 reservedpages
<< (PAGE_SHIFT
-10),
639 * Sync boot_level4_pgt mappings with the init_level4_pgt
640 * except for the low identity mappings which are already zapped
641 * in init_level4_pgt. This sync-up is essential for AP's bringup
643 memcpy(boot_level4_pgt
+1, init_level4_pgt
+1, (PTRS_PER_PGD
-1)*sizeof(pgd_t
));
647 void free_initmem(void)
651 addr
= (unsigned long)(&__init_begin
);
652 for (; addr
< (unsigned long)(&__init_end
); addr
+= PAGE_SIZE
) {
653 ClearPageReserved(virt_to_page(addr
));
654 init_page_count(virt_to_page(addr
));
655 memset((void *)(addr
& ~(PAGE_SIZE
-1)), 0xcc, PAGE_SIZE
);
659 memset(__initdata_begin
, 0xba, __initdata_end
- __initdata_begin
);
660 printk ("Freeing unused kernel memory: %luk freed\n", (__init_end
- __init_begin
) >> 10);
663 #ifdef CONFIG_DEBUG_RODATA
665 extern char __start_rodata
, __end_rodata
;
666 void mark_rodata_ro(void)
668 unsigned long addr
= (unsigned long)&__start_rodata
;
670 for (; addr
< (unsigned long)&__end_rodata
; addr
+= PAGE_SIZE
)
671 change_page_attr_addr(addr
, 1, PAGE_KERNEL_RO
);
673 printk ("Write protecting the kernel read-only data: %luk\n",
674 (&__end_rodata
- &__start_rodata
) >> 10);
677 * change_page_attr_addr() requires a global_flush_tlb() call after it.
678 * We do this after the printk so that if something went wrong in the
679 * change, the printk gets out at least to give a better debug hint
680 * of who is the culprit.
686 #ifdef CONFIG_BLK_DEV_INITRD
687 void free_initrd_mem(unsigned long start
, unsigned long end
)
691 printk ("Freeing initrd memory: %ldk freed\n", (end
- start
) >> 10);
692 for (; start
< end
; start
+= PAGE_SIZE
) {
693 ClearPageReserved(virt_to_page(start
));
694 init_page_count(virt_to_page(start
));
701 void __init
reserve_bootmem_generic(unsigned long phys
, unsigned len
)
703 /* Should check here against the e820 map to avoid double free */
705 int nid
= phys_to_nid(phys
);
706 reserve_bootmem_node(NODE_DATA(nid
), phys
, len
);
708 reserve_bootmem(phys
, len
);
710 if (phys
+len
<= MAX_DMA_PFN
*PAGE_SIZE
)
711 dma_reserve
+= len
/ PAGE_SIZE
;
714 int kern_addr_valid(unsigned long addr
)
716 unsigned long above
= ((long)addr
) >> __VIRTUAL_MASK_SHIFT
;
722 if (above
!= 0 && above
!= -1UL)
725 pgd
= pgd_offset_k(addr
);
729 pud
= pud_offset(pgd
, addr
);
733 pmd
= pmd_offset(pud
, addr
);
737 return pfn_valid(pmd_pfn(*pmd
));
739 pte
= pte_offset_kernel(pmd
, addr
);
742 return pfn_valid(pte_pfn(*pte
));
746 #include <linux/sysctl.h>
748 extern int exception_trace
, page_fault_trace
;
750 static ctl_table debug_table2
[] = {
751 { 99, "exception-trace", &exception_trace
, sizeof(int), 0644, NULL
,
756 static ctl_table debug_root_table2
[] = {
757 { .ctl_name
= CTL_DEBUG
, .procname
= "debug", .mode
= 0555,
758 .child
= debug_table2
},
762 static __init
int x8664_sysctl_init(void)
764 register_sysctl_table(debug_root_table2
, 1);
767 __initcall(x8664_sysctl_init
);
770 /* A pseudo VMAs to allow ptrace access for the vsyscall page. This only
771 covers the 64bit vsyscall page now. 32bit has a real VMA now and does
772 not need special handling anymore. */
774 static struct vm_area_struct gate_vma
= {
775 .vm_start
= VSYSCALL_START
,
776 .vm_end
= VSYSCALL_END
,
777 .vm_page_prot
= PAGE_READONLY
780 struct vm_area_struct
*get_gate_vma(struct task_struct
*tsk
)
782 #ifdef CONFIG_IA32_EMULATION
783 if (test_tsk_thread_flag(tsk
, TIF_IA32
))
789 int in_gate_area(struct task_struct
*task
, unsigned long addr
)
791 struct vm_area_struct
*vma
= get_gate_vma(task
);
794 return (addr
>= vma
->vm_start
) && (addr
< vma
->vm_end
);
797 /* Use this when you have no reliable task/vma, typically from interrupt
798 * context. It is less reliable than using the task's vma and may give
801 int in_gate_area_no_task(unsigned long addr
)
803 return (addr
>= VSYSCALL_START
) && (addr
< VSYSCALL_END
);