Linux v2.6.13-rc3
[pohmelfs.git] / arch / x86_64 / mm / init.c
blob72e4b364ed739bd25a4c31800664445801d8cc3b
1 /*
2 * linux/arch/x86_64/mm/init.c
4 * Copyright (C) 1995 Linus Torvalds
5 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
6 * Copyright (C) 2002,2003 Andi Kleen <ak@suse.de>
7 */
9 #include <linux/config.h>
10 #include <linux/signal.h>
11 #include <linux/sched.h>
12 #include <linux/kernel.h>
13 #include <linux/errno.h>
14 #include <linux/string.h>
15 #include <linux/types.h>
16 #include <linux/ptrace.h>
17 #include <linux/mman.h>
18 #include <linux/mm.h>
19 #include <linux/swap.h>
20 #include <linux/smp.h>
21 #include <linux/init.h>
22 #include <linux/pagemap.h>
23 #include <linux/bootmem.h>
24 #include <linux/proc_fs.h>
26 #include <asm/processor.h>
27 #include <asm/system.h>
28 #include <asm/uaccess.h>
29 #include <asm/pgtable.h>
30 #include <asm/pgalloc.h>
31 #include <asm/dma.h>
32 #include <asm/fixmap.h>
33 #include <asm/e820.h>
34 #include <asm/apic.h>
35 #include <asm/tlb.h>
36 #include <asm/mmu_context.h>
37 #include <asm/proto.h>
38 #include <asm/smp.h>
40 #ifndef Dprintk
41 #define Dprintk(x...)
42 #endif
44 #ifdef CONFIG_GART_IOMMU
45 extern int swiotlb;
46 #endif
48 extern char _stext[];
50 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
53 * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the
54 * physical space so we can cache the place of the first one and move
55 * around without checking the pgd every time.
58 void show_mem(void)
60 int i, total = 0, reserved = 0;
61 int shared = 0, cached = 0;
62 pg_data_t *pgdat;
63 struct page *page;
65 printk("Mem-info:\n");
66 show_free_areas();
67 printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
69 for_each_pgdat(pgdat) {
70 for (i = 0; i < pgdat->node_spanned_pages; ++i) {
71 page = pfn_to_page(pgdat->node_start_pfn + i);
72 total++;
73 if (PageReserved(page))
74 reserved++;
75 else if (PageSwapCache(page))
76 cached++;
77 else if (page_count(page))
78 shared += page_count(page) - 1;
81 printk("%d pages of RAM\n", total);
82 printk("%d reserved pages\n",reserved);
83 printk("%d pages shared\n",shared);
84 printk("%d pages swap cached\n",cached);
87 /* References to section boundaries */
89 extern char _text, _etext, _edata, __bss_start, _end[];
90 extern char __init_begin, __init_end;
92 int after_bootmem;
94 static void *spp_getpage(void)
96 void *ptr;
97 if (after_bootmem)
98 ptr = (void *) get_zeroed_page(GFP_ATOMIC);
99 else
100 ptr = alloc_bootmem_pages(PAGE_SIZE);
101 if (!ptr || ((unsigned long)ptr & ~PAGE_MASK))
102 panic("set_pte_phys: cannot allocate page data %s\n", after_bootmem?"after bootmem":"");
104 Dprintk("spp_getpage %p\n", ptr);
105 return ptr;
108 static void set_pte_phys(unsigned long vaddr,
109 unsigned long phys, pgprot_t prot)
111 pgd_t *pgd;
112 pud_t *pud;
113 pmd_t *pmd;
114 pte_t *pte, new_pte;
116 Dprintk("set_pte_phys %lx to %lx\n", vaddr, phys);
118 pgd = pgd_offset_k(vaddr);
119 if (pgd_none(*pgd)) {
120 printk("PGD FIXMAP MISSING, it should be setup in head.S!\n");
121 return;
123 pud = pud_offset(pgd, vaddr);
124 if (pud_none(*pud)) {
125 pmd = (pmd_t *) spp_getpage();
126 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER));
127 if (pmd != pmd_offset(pud, 0)) {
128 printk("PAGETABLE BUG #01! %p <-> %p\n", pmd, pmd_offset(pud,0));
129 return;
132 pmd = pmd_offset(pud, vaddr);
133 if (pmd_none(*pmd)) {
134 pte = (pte_t *) spp_getpage();
135 set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER));
136 if (pte != pte_offset_kernel(pmd, 0)) {
137 printk("PAGETABLE BUG #02!\n");
138 return;
141 new_pte = pfn_pte(phys >> PAGE_SHIFT, prot);
143 pte = pte_offset_kernel(pmd, vaddr);
144 if (!pte_none(*pte) &&
145 pte_val(*pte) != (pte_val(new_pte) & __supported_pte_mask))
146 pte_ERROR(*pte);
147 set_pte(pte, new_pte);
150 * It's enough to flush this one mapping.
151 * (PGE mappings get flushed as well)
153 __flush_tlb_one(vaddr);
156 /* NOTE: this is meant to be run only at boot */
157 void __set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
159 unsigned long address = __fix_to_virt(idx);
161 if (idx >= __end_of_fixed_addresses) {
162 printk("Invalid __set_fixmap\n");
163 return;
165 set_pte_phys(address, phys, prot);
168 unsigned long __initdata table_start, table_end;
170 extern pmd_t temp_boot_pmds[];
172 static struct temp_map {
173 pmd_t *pmd;
174 void *address;
175 int allocated;
176 } temp_mappings[] __initdata = {
177 { &temp_boot_pmds[0], (void *)(40UL * 1024 * 1024) },
178 { &temp_boot_pmds[1], (void *)(42UL * 1024 * 1024) },
182 static __init void *alloc_low_page(int *index, unsigned long *phys)
184 struct temp_map *ti;
185 int i;
186 unsigned long pfn = table_end++, paddr;
187 void *adr;
189 if (pfn >= end_pfn)
190 panic("alloc_low_page: ran out of memory");
191 for (i = 0; temp_mappings[i].allocated; i++) {
192 if (!temp_mappings[i].pmd)
193 panic("alloc_low_page: ran out of temp mappings");
195 ti = &temp_mappings[i];
196 paddr = (pfn << PAGE_SHIFT) & PMD_MASK;
197 set_pmd(ti->pmd, __pmd(paddr | _KERNPG_TABLE | _PAGE_PSE));
198 ti->allocated = 1;
199 __flush_tlb();
200 adr = ti->address + ((pfn << PAGE_SHIFT) & ~PMD_MASK);
201 *index = i;
202 *phys = pfn * PAGE_SIZE;
203 return adr;
206 static __init void unmap_low_page(int i)
208 struct temp_map *ti = &temp_mappings[i];
209 set_pmd(ti->pmd, __pmd(0));
210 ti->allocated = 0;
213 static void __init phys_pud_init(pud_t *pud, unsigned long address, unsigned long end)
215 long i, j;
217 i = pud_index(address);
218 pud = pud + i;
219 for (; i < PTRS_PER_PUD; pud++, i++) {
220 int map;
221 unsigned long paddr, pmd_phys;
222 pmd_t *pmd;
224 paddr = address + i*PUD_SIZE;
225 if (paddr >= end) {
226 for (; i < PTRS_PER_PUD; i++, pud++)
227 set_pud(pud, __pud(0));
228 break;
231 if (!e820_mapped(paddr, paddr+PUD_SIZE, 0)) {
232 set_pud(pud, __pud(0));
233 continue;
236 pmd = alloc_low_page(&map, &pmd_phys);
237 set_pud(pud, __pud(pmd_phys | _KERNPG_TABLE));
238 for (j = 0; j < PTRS_PER_PMD; pmd++, j++, paddr += PMD_SIZE) {
239 unsigned long pe;
241 if (paddr >= end) {
242 for (; j < PTRS_PER_PMD; j++, pmd++)
243 set_pmd(pmd, __pmd(0));
244 break;
246 pe = _PAGE_NX|_PAGE_PSE | _KERNPG_TABLE | _PAGE_GLOBAL | paddr;
247 pe &= __supported_pte_mask;
248 set_pmd(pmd, __pmd(pe));
250 unmap_low_page(map);
252 __flush_tlb();
255 static void __init find_early_table_space(unsigned long end)
257 unsigned long puds, pmds, tables;
259 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
260 pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
261 tables = round_up(puds * sizeof(pud_t), PAGE_SIZE) +
262 round_up(pmds * sizeof(pmd_t), PAGE_SIZE);
264 table_start = find_e820_area(0x8000, __pa_symbol(&_text), tables);
265 if (table_start == -1UL)
266 panic("Cannot find space for the kernel page tables");
268 table_start >>= PAGE_SHIFT;
269 table_end = table_start;
272 /* Setup the direct mapping of the physical memory at PAGE_OFFSET.
273 This runs before bootmem is initialized and gets pages directly from the
274 physical memory. To access them they are temporarily mapped. */
275 void __init init_memory_mapping(unsigned long start, unsigned long end)
277 unsigned long next;
279 Dprintk("init_memory_mapping\n");
282 * Find space for the kernel direct mapping tables.
283 * Later we should allocate these tables in the local node of the memory
284 * mapped. Unfortunately this is done currently before the nodes are
285 * discovered.
287 find_early_table_space(end);
289 start = (unsigned long)__va(start);
290 end = (unsigned long)__va(end);
292 for (; start < end; start = next) {
293 int map;
294 unsigned long pud_phys;
295 pud_t *pud = alloc_low_page(&map, &pud_phys);
296 next = start + PGDIR_SIZE;
297 if (next > end)
298 next = end;
299 phys_pud_init(pud, __pa(start), __pa(next));
300 set_pgd(pgd_offset_k(start), mk_kernel_pgd(pud_phys));
301 unmap_low_page(map);
304 asm volatile("movq %%cr4,%0" : "=r" (mmu_cr4_features));
305 __flush_tlb_all();
306 early_printk("kernel direct mapping tables upto %lx @ %lx-%lx\n", end,
307 table_start<<PAGE_SHIFT,
308 table_end<<PAGE_SHIFT);
311 extern struct x8664_pda cpu_pda[NR_CPUS];
313 /* Assumes all CPUs still execute in init_mm */
314 void zap_low_mappings(void)
316 pgd_t *pgd = pgd_offset_k(0UL);
317 pgd_clear(pgd);
318 flush_tlb_all();
321 #ifndef CONFIG_NUMA
322 void __init paging_init(void)
325 unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0};
326 unsigned int max_dma;
328 max_dma = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
330 if (end_pfn < max_dma)
331 zones_size[ZONE_DMA] = end_pfn;
332 else {
333 zones_size[ZONE_DMA] = max_dma;
334 zones_size[ZONE_NORMAL] = end_pfn - max_dma;
336 free_area_init(zones_size);
338 return;
340 #endif
342 /* Unmap a kernel mapping if it exists. This is useful to avoid prefetches
343 from the CPU leading to inconsistent cache lines. address and size
344 must be aligned to 2MB boundaries.
345 Does nothing when the mapping doesn't exist. */
346 void __init clear_kernel_mapping(unsigned long address, unsigned long size)
348 unsigned long end = address + size;
350 BUG_ON(address & ~LARGE_PAGE_MASK);
351 BUG_ON(size & ~LARGE_PAGE_MASK);
353 for (; address < end; address += LARGE_PAGE_SIZE) {
354 pgd_t *pgd = pgd_offset_k(address);
355 pud_t *pud;
356 pmd_t *pmd;
357 if (pgd_none(*pgd))
358 continue;
359 pud = pud_offset(pgd, address);
360 if (pud_none(*pud))
361 continue;
362 pmd = pmd_offset(pud, address);
363 if (!pmd || pmd_none(*pmd))
364 continue;
365 if (0 == (pmd_val(*pmd) & _PAGE_PSE)) {
366 /* Could handle this, but it should not happen currently. */
367 printk(KERN_ERR
368 "clear_kernel_mapping: mapping has been split. will leak memory\n");
369 pmd_ERROR(*pmd);
371 set_pmd(pmd, __pmd(0));
373 __flush_tlb_all();
376 static inline int page_is_ram (unsigned long pagenr)
378 int i;
380 for (i = 0; i < e820.nr_map; i++) {
381 unsigned long addr, end;
383 if (e820.map[i].type != E820_RAM) /* not usable memory */
384 continue;
386 * !!!FIXME!!! Some BIOSen report areas as RAM that
387 * are not. Notably the 640->1Mb area. We need a sanity
388 * check here.
390 addr = (e820.map[i].addr+PAGE_SIZE-1) >> PAGE_SHIFT;
391 end = (e820.map[i].addr+e820.map[i].size) >> PAGE_SHIFT;
392 if ((pagenr >= addr) && (pagenr < end))
393 return 1;
395 return 0;
398 extern int swiotlb_force;
400 static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel, kcore_modules,
401 kcore_vsyscall;
403 void __init mem_init(void)
405 int codesize, reservedpages, datasize, initsize;
406 int tmp;
408 #ifdef CONFIG_SWIOTLB
409 if (swiotlb_force)
410 swiotlb = 1;
411 if (!iommu_aperture &&
412 (end_pfn >= 0xffffffff>>PAGE_SHIFT || force_iommu))
413 swiotlb = 1;
414 if (swiotlb)
415 swiotlb_init();
416 #endif
418 /* How many end-of-memory variables you have, grandma! */
419 max_low_pfn = end_pfn;
420 max_pfn = end_pfn;
421 num_physpages = end_pfn;
422 high_memory = (void *) __va(end_pfn * PAGE_SIZE);
424 /* clear the zero-page */
425 memset(empty_zero_page, 0, PAGE_SIZE);
427 reservedpages = 0;
429 /* this will put all low memory onto the freelists */
430 #ifdef CONFIG_NUMA
431 totalram_pages += numa_free_all_bootmem();
432 tmp = 0;
433 /* should count reserved pages here for all nodes */
434 #else
436 #ifdef CONFIG_FLATMEM
437 max_mapnr = end_pfn;
438 if (!mem_map) BUG();
439 #endif
441 totalram_pages += free_all_bootmem();
443 for (tmp = 0; tmp < end_pfn; tmp++)
445 * Only count reserved RAM pages
447 if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp)))
448 reservedpages++;
449 #endif
451 after_bootmem = 1;
453 codesize = (unsigned long) &_etext - (unsigned long) &_text;
454 datasize = (unsigned long) &_edata - (unsigned long) &_etext;
455 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
457 /* Register memory areas for /proc/kcore */
458 kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
459 kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
460 VMALLOC_END-VMALLOC_START);
461 kclist_add(&kcore_kernel, &_stext, _end - _stext);
462 kclist_add(&kcore_modules, (void *)MODULES_VADDR, MODULES_LEN);
463 kclist_add(&kcore_vsyscall, (void *)VSYSCALL_START,
464 VSYSCALL_END - VSYSCALL_START);
466 printk("Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init)\n",
467 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
468 end_pfn << (PAGE_SHIFT-10),
469 codesize >> 10,
470 reservedpages << (PAGE_SHIFT-10),
471 datasize >> 10,
472 initsize >> 10);
475 * Subtle. SMP is doing its boot stuff late (because it has to
476 * fork idle threads) - but it also needs low mappings for the
477 * protected-mode entry to work. We zap these entries only after
478 * the WP-bit has been tested.
480 #ifndef CONFIG_SMP
481 zap_low_mappings();
482 #endif
485 extern char __initdata_begin[], __initdata_end[];
487 void free_initmem(void)
489 unsigned long addr;
491 addr = (unsigned long)(&__init_begin);
492 for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
493 ClearPageReserved(virt_to_page(addr));
494 set_page_count(virt_to_page(addr), 1);
495 memset((void *)(addr & ~(PAGE_SIZE-1)), 0xcc, PAGE_SIZE);
496 free_page(addr);
497 totalram_pages++;
499 memset(__initdata_begin, 0xba, __initdata_end - __initdata_begin);
500 printk ("Freeing unused kernel memory: %luk freed\n", (&__init_end - &__init_begin) >> 10);
503 #ifdef CONFIG_BLK_DEV_INITRD
504 void free_initrd_mem(unsigned long start, unsigned long end)
506 if (start < (unsigned long)&_end)
507 return;
508 printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
509 for (; start < end; start += PAGE_SIZE) {
510 ClearPageReserved(virt_to_page(start));
511 set_page_count(virt_to_page(start), 1);
512 free_page(start);
513 totalram_pages++;
516 #endif
518 void __init reserve_bootmem_generic(unsigned long phys, unsigned len)
520 /* Should check here against the e820 map to avoid double free */
521 #ifdef CONFIG_NUMA
522 int nid = phys_to_nid(phys);
523 reserve_bootmem_node(NODE_DATA(nid), phys, len);
524 #else
525 reserve_bootmem(phys, len);
526 #endif
529 int kern_addr_valid(unsigned long addr)
531 unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT;
532 pgd_t *pgd;
533 pud_t *pud;
534 pmd_t *pmd;
535 pte_t *pte;
537 if (above != 0 && above != -1UL)
538 return 0;
540 pgd = pgd_offset_k(addr);
541 if (pgd_none(*pgd))
542 return 0;
544 pud = pud_offset(pgd, addr);
545 if (pud_none(*pud))
546 return 0;
548 pmd = pmd_offset(pud, addr);
549 if (pmd_none(*pmd))
550 return 0;
551 if (pmd_large(*pmd))
552 return pfn_valid(pmd_pfn(*pmd));
554 pte = pte_offset_kernel(pmd, addr);
555 if (pte_none(*pte))
556 return 0;
557 return pfn_valid(pte_pfn(*pte));
560 #ifdef CONFIG_SYSCTL
561 #include <linux/sysctl.h>
563 extern int exception_trace, page_fault_trace;
565 static ctl_table debug_table2[] = {
566 { 99, "exception-trace", &exception_trace, sizeof(int), 0644, NULL,
567 proc_dointvec },
568 #ifdef CONFIG_CHECKING
569 { 100, "page-fault-trace", &page_fault_trace, sizeof(int), 0644, NULL,
570 proc_dointvec },
571 #endif
572 { 0, }
575 static ctl_table debug_root_table2[] = {
576 { .ctl_name = CTL_DEBUG, .procname = "debug", .mode = 0555,
577 .child = debug_table2 },
578 { 0 },
581 static __init int x8664_sysctl_init(void)
583 register_sysctl_table(debug_root_table2, 1);
584 return 0;
586 __initcall(x8664_sysctl_init);
587 #endif
589 /* A pseudo VMAs to allow ptrace access for the vsyscall page. This only
590 covers the 64bit vsyscall page now. 32bit has a real VMA now and does
591 not need special handling anymore. */
593 static struct vm_area_struct gate_vma = {
594 .vm_start = VSYSCALL_START,
595 .vm_end = VSYSCALL_END,
596 .vm_page_prot = PAGE_READONLY
599 struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
601 #ifdef CONFIG_IA32_EMULATION
602 if (test_tsk_thread_flag(tsk, TIF_IA32))
603 return NULL;
604 #endif
605 return &gate_vma;
608 int in_gate_area(struct task_struct *task, unsigned long addr)
610 struct vm_area_struct *vma = get_gate_vma(task);
611 if (!vma)
612 return 0;
613 return (addr >= vma->vm_start) && (addr < vma->vm_end);
616 /* Use this when you have no reliable task/vma, typically from interrupt
617 * context. It is less reliable than using the task's vma and may give
618 * false positives.
620 int in_gate_area_no_task(unsigned long addr)
622 return (addr >= VSYSCALL_START) && (addr < VSYSCALL_END);