[PATCH] x86-64: Use e820_find_hole to compute reserved pages
[linux-2.6/verdex.git] / arch / x86_64 / mm / init.c
blob5659d979316dd5349c57b6d7d5a1f68a105e936a
1 /*
2 * linux/arch/x86_64/mm/init.c
4 * Copyright (C) 1995 Linus Torvalds
5 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
6 * Copyright (C) 2002,2003 Andi Kleen <ak@suse.de>
7 */
9 #include <linux/config.h>
10 #include <linux/signal.h>
11 #include <linux/sched.h>
12 #include <linux/kernel.h>
13 #include <linux/errno.h>
14 #include <linux/string.h>
15 #include <linux/types.h>
16 #include <linux/ptrace.h>
17 #include <linux/mman.h>
18 #include <linux/mm.h>
19 #include <linux/swap.h>
20 #include <linux/smp.h>
21 #include <linux/init.h>
22 #include <linux/pagemap.h>
23 #include <linux/bootmem.h>
24 #include <linux/proc_fs.h>
26 #include <asm/processor.h>
27 #include <asm/system.h>
28 #include <asm/uaccess.h>
29 #include <asm/pgtable.h>
30 #include <asm/pgalloc.h>
31 #include <asm/dma.h>
32 #include <asm/fixmap.h>
33 #include <asm/e820.h>
34 #include <asm/apic.h>
35 #include <asm/tlb.h>
36 #include <asm/mmu_context.h>
37 #include <asm/proto.h>
38 #include <asm/smp.h>
40 #ifndef Dprintk
41 #define Dprintk(x...)
42 #endif
44 #ifdef CONFIG_GART_IOMMU
45 extern int swiotlb;
46 #endif
48 extern char _stext[];
50 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
53 * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the
54 * physical space so we can cache the place of the first one and move
55 * around without checking the pgd every time.
58 void show_mem(void)
60 int i, total = 0, reserved = 0;
61 int shared = 0, cached = 0;
62 pg_data_t *pgdat;
63 struct page *page;
65 printk("Mem-info:\n");
66 show_free_areas();
67 printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
69 for_each_pgdat(pgdat) {
70 for (i = 0; i < pgdat->node_spanned_pages; ++i) {
71 page = pfn_to_page(pgdat->node_start_pfn + i);
72 total++;
73 if (PageReserved(page))
74 reserved++;
75 else if (PageSwapCache(page))
76 cached++;
77 else if (page_count(page))
78 shared += page_count(page) - 1;
81 printk("%d pages of RAM\n", total);
82 printk("%d reserved pages\n",reserved);
83 printk("%d pages shared\n",shared);
84 printk("%d pages swap cached\n",cached);
87 /* References to section boundaries */
89 extern char _text, _etext, _edata, __bss_start, _end[];
90 extern char __init_begin, __init_end;
92 int after_bootmem;
94 static void *spp_getpage(void)
96 void *ptr;
97 if (after_bootmem)
98 ptr = (void *) get_zeroed_page(GFP_ATOMIC);
99 else
100 ptr = alloc_bootmem_pages(PAGE_SIZE);
101 if (!ptr || ((unsigned long)ptr & ~PAGE_MASK))
102 panic("set_pte_phys: cannot allocate page data %s\n", after_bootmem?"after bootmem":"");
104 Dprintk("spp_getpage %p\n", ptr);
105 return ptr;
108 static void set_pte_phys(unsigned long vaddr,
109 unsigned long phys, pgprot_t prot)
111 pgd_t *pgd;
112 pud_t *pud;
113 pmd_t *pmd;
114 pte_t *pte, new_pte;
116 Dprintk("set_pte_phys %lx to %lx\n", vaddr, phys);
118 pgd = pgd_offset_k(vaddr);
119 if (pgd_none(*pgd)) {
120 printk("PGD FIXMAP MISSING, it should be setup in head.S!\n");
121 return;
123 pud = pud_offset(pgd, vaddr);
124 if (pud_none(*pud)) {
125 pmd = (pmd_t *) spp_getpage();
126 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER));
127 if (pmd != pmd_offset(pud, 0)) {
128 printk("PAGETABLE BUG #01! %p <-> %p\n", pmd, pmd_offset(pud,0));
129 return;
132 pmd = pmd_offset(pud, vaddr);
133 if (pmd_none(*pmd)) {
134 pte = (pte_t *) spp_getpage();
135 set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER));
136 if (pte != pte_offset_kernel(pmd, 0)) {
137 printk("PAGETABLE BUG #02!\n");
138 return;
141 new_pte = pfn_pte(phys >> PAGE_SHIFT, prot);
143 pte = pte_offset_kernel(pmd, vaddr);
144 if (!pte_none(*pte) &&
145 pte_val(*pte) != (pte_val(new_pte) & __supported_pte_mask))
146 pte_ERROR(*pte);
147 set_pte(pte, new_pte);
150 * It's enough to flush this one mapping.
151 * (PGE mappings get flushed as well)
153 __flush_tlb_one(vaddr);
156 /* NOTE: this is meant to be run only at boot */
157 void __set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
159 unsigned long address = __fix_to_virt(idx);
161 if (idx >= __end_of_fixed_addresses) {
162 printk("Invalid __set_fixmap\n");
163 return;
165 set_pte_phys(address, phys, prot);
168 unsigned long __initdata table_start, table_end;
170 extern pmd_t temp_boot_pmds[];
172 static struct temp_map {
173 pmd_t *pmd;
174 void *address;
175 int allocated;
176 } temp_mappings[] __initdata = {
177 { &temp_boot_pmds[0], (void *)(40UL * 1024 * 1024) },
178 { &temp_boot_pmds[1], (void *)(42UL * 1024 * 1024) },
182 static __init void *alloc_low_page(int *index, unsigned long *phys)
184 struct temp_map *ti;
185 int i;
186 unsigned long pfn = table_end++, paddr;
187 void *adr;
189 if (pfn >= end_pfn)
190 panic("alloc_low_page: ran out of memory");
191 for (i = 0; temp_mappings[i].allocated; i++) {
192 if (!temp_mappings[i].pmd)
193 panic("alloc_low_page: ran out of temp mappings");
195 ti = &temp_mappings[i];
196 paddr = (pfn << PAGE_SHIFT) & PMD_MASK;
197 set_pmd(ti->pmd, __pmd(paddr | _KERNPG_TABLE | _PAGE_PSE));
198 ti->allocated = 1;
199 __flush_tlb();
200 adr = ti->address + ((pfn << PAGE_SHIFT) & ~PMD_MASK);
201 *index = i;
202 *phys = pfn * PAGE_SIZE;
203 return adr;
206 static __init void unmap_low_page(int i)
208 struct temp_map *ti = &temp_mappings[i];
209 set_pmd(ti->pmd, __pmd(0));
210 ti->allocated = 0;
213 static void __init phys_pud_init(pud_t *pud, unsigned long address, unsigned long end)
215 long i, j;
217 i = pud_index(address);
218 pud = pud + i;
219 for (; i < PTRS_PER_PUD; pud++, i++) {
220 int map;
221 unsigned long paddr, pmd_phys;
222 pmd_t *pmd;
224 paddr = address + i*PUD_SIZE;
225 if (paddr >= end) {
226 for (; i < PTRS_PER_PUD; i++, pud++)
227 set_pud(pud, __pud(0));
228 break;
231 if (!e820_mapped(paddr, paddr+PUD_SIZE, 0)) {
232 set_pud(pud, __pud(0));
233 continue;
236 pmd = alloc_low_page(&map, &pmd_phys);
237 set_pud(pud, __pud(pmd_phys | _KERNPG_TABLE));
238 for (j = 0; j < PTRS_PER_PMD; pmd++, j++, paddr += PMD_SIZE) {
239 unsigned long pe;
241 if (paddr >= end) {
242 for (; j < PTRS_PER_PMD; j++, pmd++)
243 set_pmd(pmd, __pmd(0));
244 break;
246 pe = _PAGE_NX|_PAGE_PSE | _KERNPG_TABLE | _PAGE_GLOBAL | paddr;
247 pe &= __supported_pte_mask;
248 set_pmd(pmd, __pmd(pe));
250 unmap_low_page(map);
252 __flush_tlb();
255 static void __init find_early_table_space(unsigned long end)
257 unsigned long puds, pmds, tables;
259 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
260 pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
261 tables = round_up(puds * sizeof(pud_t), PAGE_SIZE) +
262 round_up(pmds * sizeof(pmd_t), PAGE_SIZE);
264 table_start = find_e820_area(0x8000, __pa_symbol(&_text), tables);
265 if (table_start == -1UL)
266 panic("Cannot find space for the kernel page tables");
268 table_start >>= PAGE_SHIFT;
269 table_end = table_start;
272 /* Setup the direct mapping of the physical memory at PAGE_OFFSET.
273 This runs before bootmem is initialized and gets pages directly from the
274 physical memory. To access them they are temporarily mapped. */
275 void __init init_memory_mapping(unsigned long start, unsigned long end)
277 unsigned long next;
279 Dprintk("init_memory_mapping\n");
282 * Find space for the kernel direct mapping tables.
283 * Later we should allocate these tables in the local node of the memory
284 * mapped. Unfortunately this is done currently before the nodes are
285 * discovered.
287 find_early_table_space(end);
289 start = (unsigned long)__va(start);
290 end = (unsigned long)__va(end);
292 for (; start < end; start = next) {
293 int map;
294 unsigned long pud_phys;
295 pud_t *pud = alloc_low_page(&map, &pud_phys);
296 next = start + PGDIR_SIZE;
297 if (next > end)
298 next = end;
299 phys_pud_init(pud, __pa(start), __pa(next));
300 set_pgd(pgd_offset_k(start), mk_kernel_pgd(pud_phys));
301 unmap_low_page(map);
304 asm volatile("movq %%cr4,%0" : "=r" (mmu_cr4_features));
305 __flush_tlb_all();
306 early_printk("kernel direct mapping tables upto %lx @ %lx-%lx\n", end,
307 table_start<<PAGE_SHIFT,
308 table_end<<PAGE_SHIFT);
311 extern struct x8664_pda cpu_pda[NR_CPUS];
313 /* Assumes all CPUs still execute in init_mm */
314 void zap_low_mappings(void)
316 pgd_t *pgd = pgd_offset_k(0UL);
317 pgd_clear(pgd);
318 flush_tlb_all();
321 #ifndef CONFIG_NUMA
322 void __init paging_init(void)
325 unsigned long zones_size[MAX_NR_ZONES];
326 unsigned long holes[MAX_NR_ZONES];
327 unsigned int max_dma;
329 memset(zones_size, 0, sizeof(zones_size));
330 memset(holes, 0, sizeof(holes));
332 max_dma = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
334 if (end_pfn < max_dma) {
335 zones_size[ZONE_DMA] = end_pfn;
336 holes[ZONE_DMA] = e820_hole_size(0, end_pfn);
337 } else {
338 zones_size[ZONE_DMA] = max_dma;
339 holes[ZONE_DMA] = e820_hole_size(0, max_dma);
340 zones_size[ZONE_NORMAL] = end_pfn - max_dma;
341 holes[ZONE_NORMAL] = e820_hole_size(max_dma, end_pfn);
343 free_area_init_node(0, NODE_DATA(0), zones_size,
344 __pa(PAGE_OFFSET) >> PAGE_SHIFT, holes);
346 return;
348 #endif
350 /* Unmap a kernel mapping if it exists. This is useful to avoid prefetches
351 from the CPU leading to inconsistent cache lines. address and size
352 must be aligned to 2MB boundaries.
353 Does nothing when the mapping doesn't exist. */
354 void __init clear_kernel_mapping(unsigned long address, unsigned long size)
356 unsigned long end = address + size;
358 BUG_ON(address & ~LARGE_PAGE_MASK);
359 BUG_ON(size & ~LARGE_PAGE_MASK);
361 for (; address < end; address += LARGE_PAGE_SIZE) {
362 pgd_t *pgd = pgd_offset_k(address);
363 pud_t *pud;
364 pmd_t *pmd;
365 if (pgd_none(*pgd))
366 continue;
367 pud = pud_offset(pgd, address);
368 if (pud_none(*pud))
369 continue;
370 pmd = pmd_offset(pud, address);
371 if (!pmd || pmd_none(*pmd))
372 continue;
373 if (0 == (pmd_val(*pmd) & _PAGE_PSE)) {
374 /* Could handle this, but it should not happen currently. */
375 printk(KERN_ERR
376 "clear_kernel_mapping: mapping has been split. will leak memory\n");
377 pmd_ERROR(*pmd);
379 set_pmd(pmd, __pmd(0));
381 __flush_tlb_all();
384 static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel, kcore_modules,
385 kcore_vsyscall;
387 void __init mem_init(void)
389 long codesize, reservedpages, datasize, initsize;
391 #ifdef CONFIG_SWIOTLB
392 if (!iommu_aperture &&
393 (end_pfn >= 0xffffffff>>PAGE_SHIFT || force_iommu))
394 swiotlb = 1;
395 if (swiotlb)
396 swiotlb_init();
397 #endif
399 /* How many end-of-memory variables you have, grandma! */
400 max_low_pfn = end_pfn;
401 max_pfn = end_pfn;
402 num_physpages = end_pfn;
403 high_memory = (void *) __va(end_pfn * PAGE_SIZE);
405 /* clear the zero-page */
406 memset(empty_zero_page, 0, PAGE_SIZE);
408 reservedpages = 0;
410 /* this will put all low memory onto the freelists */
411 #ifdef CONFIG_NUMA
412 totalram_pages = numa_free_all_bootmem();
413 #else
415 #ifdef CONFIG_FLATMEM
416 max_mapnr = end_pfn;
417 if (!mem_map) BUG();
418 #endif
419 totalram_pages = free_all_bootmem();
420 #endif
421 reservedpages = end_pfn - totalram_pages - e820_hole_size(0, end_pfn);
423 after_bootmem = 1;
425 codesize = (unsigned long) &_etext - (unsigned long) &_text;
426 datasize = (unsigned long) &_edata - (unsigned long) &_etext;
427 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
429 /* Register memory areas for /proc/kcore */
430 kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
431 kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
432 VMALLOC_END-VMALLOC_START);
433 kclist_add(&kcore_kernel, &_stext, _end - _stext);
434 kclist_add(&kcore_modules, (void *)MODULES_VADDR, MODULES_LEN);
435 kclist_add(&kcore_vsyscall, (void *)VSYSCALL_START,
436 VSYSCALL_END - VSYSCALL_START);
438 printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, %ldk data, %ldk init)\n",
439 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
440 end_pfn << (PAGE_SHIFT-10),
441 codesize >> 10,
442 reservedpages << (PAGE_SHIFT-10),
443 datasize >> 10,
444 initsize >> 10);
447 * Subtle. SMP is doing its boot stuff late (because it has to
448 * fork idle threads) - but it also needs low mappings for the
449 * protected-mode entry to work. We zap these entries only after
450 * the WP-bit has been tested.
452 #ifndef CONFIG_SMP
453 zap_low_mappings();
454 #endif
457 extern char __initdata_begin[], __initdata_end[];
459 void free_initmem(void)
461 unsigned long addr;
463 addr = (unsigned long)(&__init_begin);
464 for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
465 ClearPageReserved(virt_to_page(addr));
466 set_page_count(virt_to_page(addr), 1);
467 memset((void *)(addr & ~(PAGE_SIZE-1)), 0xcc, PAGE_SIZE);
468 free_page(addr);
469 totalram_pages++;
471 memset(__initdata_begin, 0xba, __initdata_end - __initdata_begin);
472 printk ("Freeing unused kernel memory: %luk freed\n", (&__init_end - &__init_begin) >> 10);
475 #ifdef CONFIG_BLK_DEV_INITRD
476 void free_initrd_mem(unsigned long start, unsigned long end)
478 if (start < (unsigned long)&_end)
479 return;
480 printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
481 for (; start < end; start += PAGE_SIZE) {
482 ClearPageReserved(virt_to_page(start));
483 set_page_count(virt_to_page(start), 1);
484 free_page(start);
485 totalram_pages++;
488 #endif
490 void __init reserve_bootmem_generic(unsigned long phys, unsigned len)
492 /* Should check here against the e820 map to avoid double free */
493 #ifdef CONFIG_NUMA
494 int nid = phys_to_nid(phys);
495 reserve_bootmem_node(NODE_DATA(nid), phys, len);
496 #else
497 reserve_bootmem(phys, len);
498 #endif
501 int kern_addr_valid(unsigned long addr)
503 unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT;
504 pgd_t *pgd;
505 pud_t *pud;
506 pmd_t *pmd;
507 pte_t *pte;
509 if (above != 0 && above != -1UL)
510 return 0;
512 pgd = pgd_offset_k(addr);
513 if (pgd_none(*pgd))
514 return 0;
516 pud = pud_offset(pgd, addr);
517 if (pud_none(*pud))
518 return 0;
520 pmd = pmd_offset(pud, addr);
521 if (pmd_none(*pmd))
522 return 0;
523 if (pmd_large(*pmd))
524 return pfn_valid(pmd_pfn(*pmd));
526 pte = pte_offset_kernel(pmd, addr);
527 if (pte_none(*pte))
528 return 0;
529 return pfn_valid(pte_pfn(*pte));
532 #ifdef CONFIG_SYSCTL
533 #include <linux/sysctl.h>
535 extern int exception_trace, page_fault_trace;
537 static ctl_table debug_table2[] = {
538 { 99, "exception-trace", &exception_trace, sizeof(int), 0644, NULL,
539 proc_dointvec },
540 #ifdef CONFIG_CHECKING
541 { 100, "page-fault-trace", &page_fault_trace, sizeof(int), 0644, NULL,
542 proc_dointvec },
543 #endif
544 { 0, }
547 static ctl_table debug_root_table2[] = {
548 { .ctl_name = CTL_DEBUG, .procname = "debug", .mode = 0555,
549 .child = debug_table2 },
550 { 0 },
553 static __init int x8664_sysctl_init(void)
555 register_sysctl_table(debug_root_table2, 1);
556 return 0;
558 __initcall(x8664_sysctl_init);
559 #endif
561 /* A pseudo VMAs to allow ptrace access for the vsyscall page. This only
562 covers the 64bit vsyscall page now. 32bit has a real VMA now and does
563 not need special handling anymore. */
565 static struct vm_area_struct gate_vma = {
566 .vm_start = VSYSCALL_START,
567 .vm_end = VSYSCALL_END,
568 .vm_page_prot = PAGE_READONLY
571 struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
573 #ifdef CONFIG_IA32_EMULATION
574 if (test_tsk_thread_flag(tsk, TIF_IA32))
575 return NULL;
576 #endif
577 return &gate_vma;
580 int in_gate_area(struct task_struct *task, unsigned long addr)
582 struct vm_area_struct *vma = get_gate_vma(task);
583 if (!vma)
584 return 0;
585 return (addr >= vma->vm_start) && (addr < vma->vm_end);
588 /* Use this when you have no reliable task/vma, typically from interrupt
589 * context. It is less reliable than using the task's vma and may give
590 * false positives.
592 int in_gate_area_no_task(unsigned long addr)
594 return (addr >= VSYSCALL_START) && (addr < VSYSCALL_END);