x86/efi: Enforce CONFIG_RELOCATABLE for EFI boot stub
[linux/fpc-iii.git] / arch / ia64 / mm / init.c
blobb6f7f43424ec492187f558b163b525d2d4583395
1 /*
2 * Initialize MMU support.
4 * Copyright (C) 1998-2003 Hewlett-Packard Co
5 * David Mosberger-Tang <davidm@hpl.hp.com>
6 */
7 #include <linux/kernel.h>
8 #include <linux/init.h>
10 #include <linux/bootmem.h>
11 #include <linux/efi.h>
12 #include <linux/elf.h>
13 #include <linux/memblock.h>
14 #include <linux/mm.h>
15 #include <linux/mmzone.h>
16 #include <linux/module.h>
17 #include <linux/personality.h>
18 #include <linux/reboot.h>
19 #include <linux/slab.h>
20 #include <linux/swap.h>
21 #include <linux/proc_fs.h>
22 #include <linux/bitops.h>
23 #include <linux/kexec.h>
25 #include <asm/dma.h>
26 #include <asm/io.h>
27 #include <asm/machvec.h>
28 #include <asm/numa.h>
29 #include <asm/patch.h>
30 #include <asm/pgalloc.h>
31 #include <asm/sal.h>
32 #include <asm/sections.h>
33 #include <asm/tlb.h>
34 #include <asm/uaccess.h>
35 #include <asm/unistd.h>
36 #include <asm/mca.h>
37 #include <asm/paravirt.h>
39 extern void ia64_tlb_init (void);
41 unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL;
43 #ifdef CONFIG_VIRTUAL_MEM_MAP
44 unsigned long VMALLOC_END = VMALLOC_END_INIT;
45 EXPORT_SYMBOL(VMALLOC_END);
46 struct page *vmem_map;
47 EXPORT_SYMBOL(vmem_map);
48 #endif
50 struct page *zero_page_memmap_ptr; /* map entry for zero page */
51 EXPORT_SYMBOL(zero_page_memmap_ptr);
53 void
54 __ia64_sync_icache_dcache (pte_t pte)
56 unsigned long addr;
57 struct page *page;
59 page = pte_page(pte);
60 addr = (unsigned long) page_address(page);
62 if (test_bit(PG_arch_1, &page->flags))
63 return; /* i-cache is already coherent with d-cache */
65 flush_icache_range(addr, addr + (PAGE_SIZE << compound_order(page)));
66 set_bit(PG_arch_1, &page->flags); /* mark page as clean */
70 * Since DMA is i-cache coherent, any (complete) pages that were written via
71 * DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to
72 * flush them when they get mapped into an executable vm-area.
74 void
75 dma_mark_clean(void *addr, size_t size)
77 unsigned long pg_addr, end;
79 pg_addr = PAGE_ALIGN((unsigned long) addr);
80 end = (unsigned long) addr + size;
81 while (pg_addr + PAGE_SIZE <= end) {
82 struct page *page = virt_to_page(pg_addr);
83 set_bit(PG_arch_1, &page->flags);
84 pg_addr += PAGE_SIZE;
88 inline void
89 ia64_set_rbs_bot (void)
91 unsigned long stack_size = rlimit_max(RLIMIT_STACK) & -16;
93 if (stack_size > MAX_USER_STACK_SIZE)
94 stack_size = MAX_USER_STACK_SIZE;
95 current->thread.rbs_bot = PAGE_ALIGN(current->mm->start_stack - stack_size);
99 * This performs some platform-dependent address space initialization.
100 * On IA-64, we want to setup the VM area for the register backing
101 * store (which grows upwards) and install the gateway page which is
102 * used for signal trampolines, etc.
104 void
105 ia64_init_addr_space (void)
107 struct vm_area_struct *vma;
109 ia64_set_rbs_bot();
112 * If we're out of memory and kmem_cache_alloc() returns NULL, we simply ignore
113 * the problem. When the process attempts to write to the register backing store
114 * for the first time, it will get a SEGFAULT in this case.
116 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
117 if (vma) {
118 INIT_LIST_HEAD(&vma->anon_vma_chain);
119 vma->vm_mm = current->mm;
120 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
121 vma->vm_end = vma->vm_start + PAGE_SIZE;
122 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
123 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
124 down_write(&current->mm->mmap_sem);
125 if (insert_vm_struct(current->mm, vma)) {
126 up_write(&current->mm->mmap_sem);
127 kmem_cache_free(vm_area_cachep, vma);
128 return;
130 up_write(&current->mm->mmap_sem);
133 /* map NaT-page at address zero to speed up speculative dereferencing of NULL: */
134 if (!(current->personality & MMAP_PAGE_ZERO)) {
135 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
136 if (vma) {
137 INIT_LIST_HEAD(&vma->anon_vma_chain);
138 vma->vm_mm = current->mm;
139 vma->vm_end = PAGE_SIZE;
140 vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT);
141 vma->vm_flags = VM_READ | VM_MAYREAD | VM_IO |
142 VM_DONTEXPAND | VM_DONTDUMP;
143 down_write(&current->mm->mmap_sem);
144 if (insert_vm_struct(current->mm, vma)) {
145 up_write(&current->mm->mmap_sem);
146 kmem_cache_free(vm_area_cachep, vma);
147 return;
149 up_write(&current->mm->mmap_sem);
154 void
155 free_initmem (void)
157 free_reserved_area(ia64_imva(__init_begin), ia64_imva(__init_end),
158 -1, "unused kernel");
161 void __init
162 free_initrd_mem (unsigned long start, unsigned long end)
165 * EFI uses 4KB pages while the kernel can use 4KB or bigger.
166 * Thus EFI and the kernel may have different page sizes. It is
167 * therefore possible to have the initrd share the same page as
168 * the end of the kernel (given current setup).
170 * To avoid freeing/using the wrong page (kernel sized) we:
171 * - align up the beginning of initrd
172 * - align down the end of initrd
174 * | |
175 * |=============| a000
176 * | |
177 * | |
178 * | | 9000
179 * |/////////////|
180 * |/////////////|
181 * |=============| 8000
182 * |///INITRD////|
183 * |/////////////|
184 * |/////////////| 7000
185 * | |
186 * |KKKKKKKKKKKKK|
187 * |=============| 6000
188 * |KKKKKKKKKKKKK|
189 * |KKKKKKKKKKKKK|
190 * K=kernel using 8KB pages
192 * In this example, we must free page 8000 ONLY. So we must align up
193 * initrd_start and keep initrd_end as is.
195 start = PAGE_ALIGN(start);
196 end = end & PAGE_MASK;
198 if (start < end)
199 printk(KERN_INFO "Freeing initrd memory: %ldkB freed\n", (end - start) >> 10);
201 for (; start < end; start += PAGE_SIZE) {
202 if (!virt_addr_valid(start))
203 continue;
204 free_reserved_page(virt_to_page(start));
209 * This installs a clean page in the kernel's page table.
211 static struct page * __init
212 put_kernel_page (struct page *page, unsigned long address, pgprot_t pgprot)
214 pgd_t *pgd;
215 pud_t *pud;
216 pmd_t *pmd;
217 pte_t *pte;
219 if (!PageReserved(page))
220 printk(KERN_ERR "put_kernel_page: page at 0x%p not in reserved memory\n",
221 page_address(page));
223 pgd = pgd_offset_k(address); /* note: this is NOT pgd_offset()! */
226 pud = pud_alloc(&init_mm, pgd, address);
227 if (!pud)
228 goto out;
229 pmd = pmd_alloc(&init_mm, pud, address);
230 if (!pmd)
231 goto out;
232 pte = pte_alloc_kernel(pmd, address);
233 if (!pte)
234 goto out;
235 if (!pte_none(*pte))
236 goto out;
237 set_pte(pte, mk_pte(page, pgprot));
239 out:
240 /* no need for flush_tlb */
241 return page;
244 static void __init
245 setup_gate (void)
247 void *gate_section;
248 struct page *page;
251 * Map the gate page twice: once read-only to export the ELF
252 * headers etc. and once execute-only page to enable
253 * privilege-promotion via "epc":
255 gate_section = paravirt_get_gate_section();
256 page = virt_to_page(ia64_imva(gate_section));
257 put_kernel_page(page, GATE_ADDR, PAGE_READONLY);
258 #ifdef HAVE_BUGGY_SEGREL
259 page = virt_to_page(ia64_imva(gate_section + PAGE_SIZE));
260 put_kernel_page(page, GATE_ADDR + PAGE_SIZE, PAGE_GATE);
261 #else
262 put_kernel_page(page, GATE_ADDR + PERCPU_PAGE_SIZE, PAGE_GATE);
263 /* Fill in the holes (if any) with read-only zero pages: */
265 unsigned long addr;
267 for (addr = GATE_ADDR + PAGE_SIZE;
268 addr < GATE_ADDR + PERCPU_PAGE_SIZE;
269 addr += PAGE_SIZE)
271 put_kernel_page(ZERO_PAGE(0), addr,
272 PAGE_READONLY);
273 put_kernel_page(ZERO_PAGE(0), addr + PERCPU_PAGE_SIZE,
274 PAGE_READONLY);
277 #endif
278 ia64_patch_gate();
281 void ia64_mmu_init(void *my_cpu_data)
283 unsigned long pta, impl_va_bits;
284 extern void tlb_init(void);
286 #ifdef CONFIG_DISABLE_VHPT
287 # define VHPT_ENABLE_BIT 0
288 #else
289 # define VHPT_ENABLE_BIT 1
290 #endif
293 * Check if the virtually mapped linear page table (VMLPT) overlaps with a mapped
294 * address space. The IA-64 architecture guarantees that at least 50 bits of
295 * virtual address space are implemented but if we pick a large enough page size
296 * (e.g., 64KB), the mapped address space is big enough that it will overlap with
297 * VMLPT. I assume that once we run on machines big enough to warrant 64KB pages,
298 * IMPL_VA_MSB will be significantly bigger, so this is unlikely to become a
299 * problem in practice. Alternatively, we could truncate the top of the mapped
300 * address space to not permit mappings that would overlap with the VMLPT.
301 * --davidm 00/12/06
303 # define pte_bits 3
304 # define mapped_space_bits (3*(PAGE_SHIFT - pte_bits) + PAGE_SHIFT)
306 * The virtual page table has to cover the entire implemented address space within
307 * a region even though not all of this space may be mappable. The reason for
308 * this is that the Access bit and Dirty bit fault handlers perform
309 * non-speculative accesses to the virtual page table, so the address range of the
310 * virtual page table itself needs to be covered by virtual page table.
312 # define vmlpt_bits (impl_va_bits - PAGE_SHIFT + pte_bits)
313 # define POW2(n) (1ULL << (n))
315 impl_va_bits = ffz(~(local_cpu_data->unimpl_va_mask | (7UL << 61)));
317 if (impl_va_bits < 51 || impl_va_bits > 61)
318 panic("CPU has bogus IMPL_VA_MSB value of %lu!\n", impl_va_bits - 1);
320 * mapped_space_bits - PAGE_SHIFT is the total number of ptes we need,
321 * which must fit into "vmlpt_bits - pte_bits" slots. Second half of
322 * the test makes sure that our mapped space doesn't overlap the
323 * unimplemented hole in the middle of the region.
325 if ((mapped_space_bits - PAGE_SHIFT > vmlpt_bits - pte_bits) ||
326 (mapped_space_bits > impl_va_bits - 1))
327 panic("Cannot build a big enough virtual-linear page table"
328 " to cover mapped address space.\n"
329 " Try using a smaller page size.\n");
332 /* place the VMLPT at the end of each page-table mapped region: */
333 pta = POW2(61) - POW2(vmlpt_bits);
336 * Set the (virtually mapped linear) page table address. Bit
337 * 8 selects between the short and long format, bits 2-7 the
338 * size of the table, and bit 0 whether the VHPT walker is
339 * enabled.
341 ia64_set_pta(pta | (0 << 8) | (vmlpt_bits << 2) | VHPT_ENABLE_BIT);
343 ia64_tlb_init();
345 #ifdef CONFIG_HUGETLB_PAGE
346 ia64_set_rr(HPAGE_REGION_BASE, HPAGE_SHIFT << 2);
347 ia64_srlz_d();
348 #endif
351 #ifdef CONFIG_VIRTUAL_MEM_MAP
352 int vmemmap_find_next_valid_pfn(int node, int i)
354 unsigned long end_address, hole_next_pfn;
355 unsigned long stop_address;
356 pg_data_t *pgdat = NODE_DATA(node);
358 end_address = (unsigned long) &vmem_map[pgdat->node_start_pfn + i];
359 end_address = PAGE_ALIGN(end_address);
361 stop_address = (unsigned long) &vmem_map[
362 pgdat->node_start_pfn + pgdat->node_spanned_pages];
364 do {
365 pgd_t *pgd;
366 pud_t *pud;
367 pmd_t *pmd;
368 pte_t *pte;
370 pgd = pgd_offset_k(end_address);
371 if (pgd_none(*pgd)) {
372 end_address += PGDIR_SIZE;
373 continue;
376 pud = pud_offset(pgd, end_address);
377 if (pud_none(*pud)) {
378 end_address += PUD_SIZE;
379 continue;
382 pmd = pmd_offset(pud, end_address);
383 if (pmd_none(*pmd)) {
384 end_address += PMD_SIZE;
385 continue;
388 pte = pte_offset_kernel(pmd, end_address);
389 retry_pte:
390 if (pte_none(*pte)) {
391 end_address += PAGE_SIZE;
392 pte++;
393 if ((end_address < stop_address) &&
394 (end_address != ALIGN(end_address, 1UL << PMD_SHIFT)))
395 goto retry_pte;
396 continue;
398 /* Found next valid vmem_map page */
399 break;
400 } while (end_address < stop_address);
402 end_address = min(end_address, stop_address);
403 end_address = end_address - (unsigned long) vmem_map + sizeof(struct page) - 1;
404 hole_next_pfn = end_address / sizeof(struct page);
405 return hole_next_pfn - pgdat->node_start_pfn;
408 int __init create_mem_map_page_table(u64 start, u64 end, void *arg)
410 unsigned long address, start_page, end_page;
411 struct page *map_start, *map_end;
412 int node;
413 pgd_t *pgd;
414 pud_t *pud;
415 pmd_t *pmd;
416 pte_t *pte;
418 map_start = vmem_map + (__pa(start) >> PAGE_SHIFT);
419 map_end = vmem_map + (__pa(end) >> PAGE_SHIFT);
421 start_page = (unsigned long) map_start & PAGE_MASK;
422 end_page = PAGE_ALIGN((unsigned long) map_end);
423 node = paddr_to_nid(__pa(start));
425 for (address = start_page; address < end_page; address += PAGE_SIZE) {
426 pgd = pgd_offset_k(address);
427 if (pgd_none(*pgd))
428 pgd_populate(&init_mm, pgd, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE));
429 pud = pud_offset(pgd, address);
431 if (pud_none(*pud))
432 pud_populate(&init_mm, pud, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE));
433 pmd = pmd_offset(pud, address);
435 if (pmd_none(*pmd))
436 pmd_populate_kernel(&init_mm, pmd, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE));
437 pte = pte_offset_kernel(pmd, address);
439 if (pte_none(*pte))
440 set_pte(pte, pfn_pte(__pa(alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE)) >> PAGE_SHIFT,
441 PAGE_KERNEL));
443 return 0;
446 struct memmap_init_callback_data {
447 struct page *start;
448 struct page *end;
449 int nid;
450 unsigned long zone;
453 static int __meminit
454 virtual_memmap_init(u64 start, u64 end, void *arg)
456 struct memmap_init_callback_data *args;
457 struct page *map_start, *map_end;
459 args = (struct memmap_init_callback_data *) arg;
460 map_start = vmem_map + (__pa(start) >> PAGE_SHIFT);
461 map_end = vmem_map + (__pa(end) >> PAGE_SHIFT);
463 if (map_start < args->start)
464 map_start = args->start;
465 if (map_end > args->end)
466 map_end = args->end;
469 * We have to initialize "out of bounds" struct page elements that fit completely
470 * on the same pages that were allocated for the "in bounds" elements because they
471 * may be referenced later (and found to be "reserved").
473 map_start -= ((unsigned long) map_start & (PAGE_SIZE - 1)) / sizeof(struct page);
474 map_end += ((PAGE_ALIGN((unsigned long) map_end) - (unsigned long) map_end)
475 / sizeof(struct page));
477 if (map_start < map_end)
478 memmap_init_zone((unsigned long)(map_end - map_start),
479 args->nid, args->zone, page_to_pfn(map_start),
480 MEMMAP_EARLY);
481 return 0;
484 void __meminit
485 memmap_init (unsigned long size, int nid, unsigned long zone,
486 unsigned long start_pfn)
488 if (!vmem_map)
489 memmap_init_zone(size, nid, zone, start_pfn, MEMMAP_EARLY);
490 else {
491 struct page *start;
492 struct memmap_init_callback_data args;
494 start = pfn_to_page(start_pfn);
495 args.start = start;
496 args.end = start + size;
497 args.nid = nid;
498 args.zone = zone;
500 efi_memmap_walk(virtual_memmap_init, &args);
505 ia64_pfn_valid (unsigned long pfn)
507 char byte;
508 struct page *pg = pfn_to_page(pfn);
510 return (__get_user(byte, (char __user *) pg) == 0)
511 && ((((u64)pg & PAGE_MASK) == (((u64)(pg + 1) - 1) & PAGE_MASK))
512 || (__get_user(byte, (char __user *) (pg + 1) - 1) == 0));
514 EXPORT_SYMBOL(ia64_pfn_valid);
516 int __init find_largest_hole(u64 start, u64 end, void *arg)
518 u64 *max_gap = arg;
520 static u64 last_end = PAGE_OFFSET;
522 /* NOTE: this algorithm assumes efi memmap table is ordered */
524 if (*max_gap < (start - last_end))
525 *max_gap = start - last_end;
526 last_end = end;
527 return 0;
530 #endif /* CONFIG_VIRTUAL_MEM_MAP */
532 int __init register_active_ranges(u64 start, u64 len, int nid)
534 u64 end = start + len;
536 #ifdef CONFIG_KEXEC
537 if (start > crashk_res.start && start < crashk_res.end)
538 start = crashk_res.end;
539 if (end > crashk_res.start && end < crashk_res.end)
540 end = crashk_res.start;
541 #endif
543 if (start < end)
544 memblock_add_node(__pa(start), end - start, nid);
545 return 0;
549 find_max_min_low_pfn (u64 start, u64 end, void *arg)
551 unsigned long pfn_start, pfn_end;
552 #ifdef CONFIG_FLATMEM
553 pfn_start = (PAGE_ALIGN(__pa(start))) >> PAGE_SHIFT;
554 pfn_end = (PAGE_ALIGN(__pa(end - 1))) >> PAGE_SHIFT;
555 #else
556 pfn_start = GRANULEROUNDDOWN(__pa(start)) >> PAGE_SHIFT;
557 pfn_end = GRANULEROUNDUP(__pa(end - 1)) >> PAGE_SHIFT;
558 #endif
559 min_low_pfn = min(min_low_pfn, pfn_start);
560 max_low_pfn = max(max_low_pfn, pfn_end);
561 return 0;
565 * Boot command-line option "nolwsys" can be used to disable the use of any light-weight
566 * system call handler. When this option is in effect, all fsyscalls will end up bubbling
567 * down into the kernel and calling the normal (heavy-weight) syscall handler. This is
568 * useful for performance testing, but conceivably could also come in handy for debugging
569 * purposes.
572 static int nolwsys __initdata;
574 static int __init
575 nolwsys_setup (char *s)
577 nolwsys = 1;
578 return 1;
581 __setup("nolwsys", nolwsys_setup);
583 void __init
584 mem_init (void)
586 int i;
588 BUG_ON(PTRS_PER_PGD * sizeof(pgd_t) != PAGE_SIZE);
589 BUG_ON(PTRS_PER_PMD * sizeof(pmd_t) != PAGE_SIZE);
590 BUG_ON(PTRS_PER_PTE * sizeof(pte_t) != PAGE_SIZE);
592 #ifdef CONFIG_PCI
594 * This needs to be called _after_ the command line has been parsed but _before_
595 * any drivers that may need the PCI DMA interface are initialized or bootmem has
596 * been freed.
598 platform_dma_init();
599 #endif
601 #ifdef CONFIG_FLATMEM
602 BUG_ON(!mem_map);
603 #endif
605 set_max_mapnr(max_low_pfn);
606 high_memory = __va(max_low_pfn * PAGE_SIZE);
607 free_all_bootmem();
608 mem_init_print_info(NULL);
611 * For fsyscall entrpoints with no light-weight handler, use the ordinary
612 * (heavy-weight) handler, but mark it by setting bit 0, so the fsyscall entry
613 * code can tell them apart.
615 for (i = 0; i < NR_syscalls; ++i) {
616 extern unsigned long sys_call_table[NR_syscalls];
617 unsigned long *fsyscall_table = paravirt_get_fsyscall_table();
619 if (!fsyscall_table[i] || nolwsys)
620 fsyscall_table[i] = sys_call_table[i] | 1;
622 setup_gate();
625 #ifdef CONFIG_MEMORY_HOTPLUG
626 int arch_add_memory(int nid, u64 start, u64 size)
628 pg_data_t *pgdat;
629 struct zone *zone;
630 unsigned long start_pfn = start >> PAGE_SHIFT;
631 unsigned long nr_pages = size >> PAGE_SHIFT;
632 int ret;
634 pgdat = NODE_DATA(nid);
636 zone = pgdat->node_zones + ZONE_NORMAL;
637 ret = __add_pages(nid, zone, start_pfn, nr_pages);
639 if (ret)
640 printk("%s: Problem encountered in __add_pages() as ret=%d\n",
641 __func__, ret);
643 return ret;
646 #ifdef CONFIG_MEMORY_HOTREMOVE
647 int arch_remove_memory(u64 start, u64 size)
649 unsigned long start_pfn = start >> PAGE_SHIFT;
650 unsigned long nr_pages = size >> PAGE_SHIFT;
651 struct zone *zone;
652 int ret;
654 zone = page_zone(pfn_to_page(start_pfn));
655 ret = __remove_pages(zone, start_pfn, nr_pages);
656 if (ret)
657 pr_warn("%s: Problem encountered in __remove_pages() as"
658 " ret=%d\n", __func__, ret);
660 return ret;
662 #endif
663 #endif
666 * Even when CONFIG_IA32_SUPPORT is not enabled it is
667 * useful to have the Linux/x86 domain registered to
668 * avoid an attempted module load when emulators call
669 * personality(PER_LINUX32). This saves several milliseconds
670 * on each such call.
672 static struct exec_domain ia32_exec_domain;
674 static int __init
675 per_linux32_init(void)
677 ia32_exec_domain.name = "Linux/x86";
678 ia32_exec_domain.handler = NULL;
679 ia32_exec_domain.pers_low = PER_LINUX32;
680 ia32_exec_domain.pers_high = PER_LINUX32;
681 ia32_exec_domain.signal_map = default_exec_domain.signal_map;
682 ia32_exec_domain.signal_invmap = default_exec_domain.signal_invmap;
683 register_exec_domain(&ia32_exec_domain);
685 return 0;
688 __initcall(per_linux32_init);