Linux 2.6.25.3
[linux/fpc-iii.git] / arch / x86 / mm / init_32.c
blobee1091a469641f95db2564c558827e972728ee37
1 /*
2 * linux/arch/i386/mm/init.c
4 * Copyright (C) 1995 Linus Torvalds
6 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
7 */
9 #include <linux/module.h>
10 #include <linux/signal.h>
11 #include <linux/sched.h>
12 #include <linux/kernel.h>
13 #include <linux/errno.h>
14 #include <linux/string.h>
15 #include <linux/types.h>
16 #include <linux/ptrace.h>
17 #include <linux/mman.h>
18 #include <linux/mm.h>
19 #include <linux/hugetlb.h>
20 #include <linux/swap.h>
21 #include <linux/smp.h>
22 #include <linux/init.h>
23 #include <linux/highmem.h>
24 #include <linux/pagemap.h>
25 #include <linux/pfn.h>
26 #include <linux/poison.h>
27 #include <linux/bootmem.h>
28 #include <linux/slab.h>
29 #include <linux/proc_fs.h>
30 #include <linux/memory_hotplug.h>
31 #include <linux/initrd.h>
32 #include <linux/cpumask.h>
34 #include <asm/asm.h>
35 #include <asm/processor.h>
36 #include <asm/system.h>
37 #include <asm/uaccess.h>
38 #include <asm/pgtable.h>
39 #include <asm/dma.h>
40 #include <asm/fixmap.h>
41 #include <asm/e820.h>
42 #include <asm/apic.h>
43 #include <asm/bugs.h>
44 #include <asm/tlb.h>
45 #include <asm/tlbflush.h>
46 #include <asm/pgalloc.h>
47 #include <asm/sections.h>
48 #include <asm/paravirt.h>
49 #include <asm/setup.h>
50 #include <asm/cacheflush.h>
52 unsigned int __VMALLOC_RESERVE = 128 << 20;
54 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
55 unsigned long highstart_pfn, highend_pfn;
57 static noinline int do_test_wp_bit(void);
60 * Creates a middle page table and puts a pointer to it in the
61 * given global directory entry. This only returns the gd entry
62 * in non-PAE compilation mode, since the middle layer is folded.
64 static pmd_t * __init one_md_table_init(pgd_t *pgd)
66 pud_t *pud;
67 pmd_t *pmd_table;
69 #ifdef CONFIG_X86_PAE
70 if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
71 pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE);
73 paravirt_alloc_pd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
74 set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
75 pud = pud_offset(pgd, 0);
76 BUG_ON(pmd_table != pmd_offset(pud, 0));
78 #endif
79 pud = pud_offset(pgd, 0);
80 pmd_table = pmd_offset(pud, 0);
82 return pmd_table;
86 * Create a page table and place a pointer to it in a middle page
87 * directory entry:
89 static pte_t * __init one_page_table_init(pmd_t *pmd)
91 if (!(pmd_val(*pmd) & _PAGE_PRESENT)) {
92 pte_t *page_table = NULL;
94 #ifdef CONFIG_DEBUG_PAGEALLOC
95 page_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE);
96 #endif
97 if (!page_table) {
98 page_table =
99 (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE);
102 paravirt_alloc_pt(&init_mm, __pa(page_table) >> PAGE_SHIFT);
103 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
104 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
107 return pte_offset_kernel(pmd, 0);
111 * This function initializes a certain range of kernel virtual memory
112 * with new bootmem page tables, everywhere page tables are missing in
113 * the given range.
115 * NOTE: The pagetables are allocated contiguous on the physical space
116 * so we can cache the place of the first one and move around without
117 * checking the pgd every time.
119 static void __init
120 page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
122 int pgd_idx, pmd_idx;
123 unsigned long vaddr;
124 pgd_t *pgd;
125 pmd_t *pmd;
127 vaddr = start;
128 pgd_idx = pgd_index(vaddr);
129 pmd_idx = pmd_index(vaddr);
130 pgd = pgd_base + pgd_idx;
132 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
133 pmd = one_md_table_init(pgd);
134 pmd = pmd + pmd_index(vaddr);
135 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
136 pmd++, pmd_idx++) {
137 one_page_table_init(pmd);
139 vaddr += PMD_SIZE;
141 pmd_idx = 0;
145 static inline int is_kernel_text(unsigned long addr)
147 if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end)
148 return 1;
149 return 0;
153 * This maps the physical memory to kernel virtual address space, a total
154 * of max_low_pfn pages, by creating page tables starting from address
155 * PAGE_OFFSET:
157 static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
159 int pgd_idx, pmd_idx, pte_ofs;
160 unsigned long pfn;
161 pgd_t *pgd;
162 pmd_t *pmd;
163 pte_t *pte;
165 pgd_idx = pgd_index(PAGE_OFFSET);
166 pgd = pgd_base + pgd_idx;
167 pfn = 0;
169 for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
170 pmd = one_md_table_init(pgd);
171 if (pfn >= max_low_pfn)
172 continue;
174 for (pmd_idx = 0;
175 pmd_idx < PTRS_PER_PMD && pfn < max_low_pfn;
176 pmd++, pmd_idx++) {
177 unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
180 * Map with big pages if possible, otherwise
181 * create normal page tables:
183 if (cpu_has_pse) {
184 unsigned int addr2;
185 pgprot_t prot = PAGE_KERNEL_LARGE;
187 addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
188 PAGE_OFFSET + PAGE_SIZE-1;
190 if (is_kernel_text(addr) ||
191 is_kernel_text(addr2))
192 prot = PAGE_KERNEL_LARGE_EXEC;
194 set_pmd(pmd, pfn_pmd(pfn, prot));
196 pfn += PTRS_PER_PTE;
197 continue;
199 pte = one_page_table_init(pmd);
201 for (pte_ofs = 0;
202 pte_ofs < PTRS_PER_PTE && pfn < max_low_pfn;
203 pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
204 pgprot_t prot = PAGE_KERNEL;
206 if (is_kernel_text(addr))
207 prot = PAGE_KERNEL_EXEC;
209 set_pte(pte, pfn_pte(pfn, prot));
215 static inline int page_kills_ppro(unsigned long pagenr)
217 if (pagenr >= 0x70000 && pagenr <= 0x7003F)
218 return 1;
219 return 0;
222 #ifdef CONFIG_HIGHMEM
223 pte_t *kmap_pte;
224 pgprot_t kmap_prot;
226 static inline pte_t *kmap_get_fixmap_pte(unsigned long vaddr)
228 return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr),
229 vaddr), vaddr), vaddr);
232 static void __init kmap_init(void)
234 unsigned long kmap_vstart;
237 * Cache the first kmap pte:
239 kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
240 kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
242 kmap_prot = PAGE_KERNEL;
245 static void __init permanent_kmaps_init(pgd_t *pgd_base)
247 unsigned long vaddr;
248 pgd_t *pgd;
249 pud_t *pud;
250 pmd_t *pmd;
251 pte_t *pte;
253 vaddr = PKMAP_BASE;
254 page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
256 pgd = swapper_pg_dir + pgd_index(vaddr);
257 pud = pud_offset(pgd, vaddr);
258 pmd = pmd_offset(pud, vaddr);
259 pte = pte_offset_kernel(pmd, vaddr);
260 pkmap_page_table = pte;
263 static void __meminit free_new_highpage(struct page *page)
265 init_page_count(page);
266 __free_page(page);
267 totalhigh_pages++;
270 void __init add_one_highpage_init(struct page *page, int pfn, int bad_ppro)
272 if (page_is_ram(pfn) && !(bad_ppro && page_kills_ppro(pfn))) {
273 ClearPageReserved(page);
274 free_new_highpage(page);
275 } else
276 SetPageReserved(page);
279 static int __meminit
280 add_one_highpage_hotplug(struct page *page, unsigned long pfn)
282 free_new_highpage(page);
283 totalram_pages++;
284 #ifdef CONFIG_FLATMEM
285 max_mapnr = max(pfn, max_mapnr);
286 #endif
287 num_physpages++;
289 return 0;
293 * Not currently handling the NUMA case.
294 * Assuming single node and all memory that
295 * has been added dynamically that would be
296 * onlined here is in HIGHMEM.
298 void __meminit online_page(struct page *page)
300 ClearPageReserved(page);
301 add_one_highpage_hotplug(page, page_to_pfn(page));
304 #ifndef CONFIG_NUMA
305 static void __init set_highmem_pages_init(int bad_ppro)
307 int pfn;
309 for (pfn = highstart_pfn; pfn < highend_pfn; pfn++) {
311 * Holes under sparsemem might not have no mem_map[]:
313 if (pfn_valid(pfn))
314 add_one_highpage_init(pfn_to_page(pfn), pfn, bad_ppro);
316 totalram_pages += totalhigh_pages;
318 #endif /* !CONFIG_NUMA */
320 #else
321 # define kmap_init() do { } while (0)
322 # define permanent_kmaps_init(pgd_base) do { } while (0)
323 # define set_highmem_pages_init(bad_ppro) do { } while (0)
324 #endif /* CONFIG_HIGHMEM */
326 pteval_t __PAGE_KERNEL = _PAGE_KERNEL;
327 EXPORT_SYMBOL(__PAGE_KERNEL);
329 pteval_t __PAGE_KERNEL_EXEC = _PAGE_KERNEL_EXEC;
331 void __init native_pagetable_setup_start(pgd_t *base)
333 unsigned long pfn, va;
334 pgd_t *pgd;
335 pud_t *pud;
336 pmd_t *pmd;
337 pte_t *pte;
340 * Remove any mappings which extend past the end of physical
341 * memory from the boot time page table:
343 for (pfn = max_low_pfn + 1; pfn < 1<<(32-PAGE_SHIFT); pfn++) {
344 va = PAGE_OFFSET + (pfn<<PAGE_SHIFT);
345 pgd = base + pgd_index(va);
346 if (!pgd_present(*pgd))
347 break;
349 pud = pud_offset(pgd, va);
350 pmd = pmd_offset(pud, va);
351 if (!pmd_present(*pmd))
352 break;
354 pte = pte_offset_kernel(pmd, va);
355 if (!pte_present(*pte))
356 break;
358 pte_clear(NULL, va, pte);
360 paravirt_alloc_pd(&init_mm, __pa(base) >> PAGE_SHIFT);
363 void __init native_pagetable_setup_done(pgd_t *base)
368 * Build a proper pagetable for the kernel mappings. Up until this
369 * point, we've been running on some set of pagetables constructed by
370 * the boot process.
372 * If we're booting on native hardware, this will be a pagetable
373 * constructed in arch/x86/kernel/head_32.S. The root of the
374 * pagetable will be swapper_pg_dir.
376 * If we're booting paravirtualized under a hypervisor, then there are
377 * more options: we may already be running PAE, and the pagetable may
378 * or may not be based in swapper_pg_dir. In any case,
379 * paravirt_pagetable_setup_start() will set up swapper_pg_dir
380 * appropriately for the rest of the initialization to work.
382 * In general, pagetable_init() assumes that the pagetable may already
383 * be partially populated, and so it avoids stomping on any existing
384 * mappings.
386 static void __init pagetable_init(void)
388 pgd_t *pgd_base = swapper_pg_dir;
389 unsigned long vaddr, end;
391 paravirt_pagetable_setup_start(pgd_base);
393 /* Enable PSE if available */
394 if (cpu_has_pse)
395 set_in_cr4(X86_CR4_PSE);
397 /* Enable PGE if available */
398 if (cpu_has_pge) {
399 set_in_cr4(X86_CR4_PGE);
400 __PAGE_KERNEL |= _PAGE_GLOBAL;
401 __PAGE_KERNEL_EXEC |= _PAGE_GLOBAL;
404 kernel_physical_mapping_init(pgd_base);
405 remap_numa_kva();
408 * Fixed mappings, only the page table structure has to be
409 * created - mappings will be set by set_fixmap():
411 early_ioremap_clear();
412 vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
413 end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
414 page_table_range_init(vaddr, end, pgd_base);
415 early_ioremap_reset();
417 permanent_kmaps_init(pgd_base);
419 paravirt_pagetable_setup_done(pgd_base);
422 #ifdef CONFIG_ACPI_SLEEP
424 * ACPI suspend needs this for resume, because things like the intel-agp
425 * driver might have split up a kernel 4MB mapping.
427 char swsusp_pg_dir[PAGE_SIZE]
428 __attribute__ ((aligned(PAGE_SIZE)));
430 static inline void save_pg_dir(void)
432 memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
434 #else /* !CONFIG_ACPI_SLEEP */
435 static inline void save_pg_dir(void)
438 #endif /* !CONFIG_ACPI_SLEEP */
440 void zap_low_mappings(void)
442 int i;
444 save_pg_dir();
447 * Zap initial low-memory mappings.
449 * Note that "pgd_clear()" doesn't do it for
450 * us, because pgd_clear() is a no-op on i386.
452 for (i = 0; i < USER_PTRS_PER_PGD; i++) {
453 #ifdef CONFIG_X86_PAE
454 set_pgd(swapper_pg_dir+i, __pgd(1 + __pa(empty_zero_page)));
455 #else
456 set_pgd(swapper_pg_dir+i, __pgd(0));
457 #endif
459 flush_tlb_all();
462 int nx_enabled;
464 pteval_t __supported_pte_mask __read_mostly = ~_PAGE_NX;
465 EXPORT_SYMBOL_GPL(__supported_pte_mask);
467 #ifdef CONFIG_X86_PAE
469 static int disable_nx __initdata;
472 * noexec = on|off
474 * Control non executable mappings.
476 * on Enable
477 * off Disable
479 static int __init noexec_setup(char *str)
481 if (!str || !strcmp(str, "on")) {
482 if (cpu_has_nx) {
483 __supported_pte_mask |= _PAGE_NX;
484 disable_nx = 0;
486 } else {
487 if (!strcmp(str, "off")) {
488 disable_nx = 1;
489 __supported_pte_mask &= ~_PAGE_NX;
490 } else {
491 return -EINVAL;
495 return 0;
497 early_param("noexec", noexec_setup);
499 static void __init set_nx(void)
501 unsigned int v[4], l, h;
503 if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
504 cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
506 if ((v[3] & (1 << 20)) && !disable_nx) {
507 rdmsr(MSR_EFER, l, h);
508 l |= EFER_NX;
509 wrmsr(MSR_EFER, l, h);
510 nx_enabled = 1;
511 __supported_pte_mask |= _PAGE_NX;
515 #endif
518 * paging_init() sets up the page tables - note that the first 8MB are
519 * already mapped by head.S.
521 * This routines also unmaps the page at virtual kernel address 0, so
522 * that we can trap those pesky NULL-reference errors in the kernel.
524 void __init paging_init(void)
526 #ifdef CONFIG_X86_PAE
527 set_nx();
528 if (nx_enabled)
529 printk(KERN_INFO "NX (Execute Disable) protection: active\n");
530 #endif
531 pagetable_init();
533 load_cr3(swapper_pg_dir);
535 __flush_tlb_all();
537 kmap_init();
541 * Test if the WP bit works in supervisor mode. It isn't supported on 386's
542 * and also on some strange 486's (NexGen etc.). All 586+'s are OK. This
543 * used to involve black magic jumps to work around some nasty CPU bugs,
544 * but fortunately the switch to using exceptions got rid of all that.
546 static void __init test_wp_bit(void)
548 printk(KERN_INFO
549 "Checking if this processor honours the WP bit even in supervisor mode...");
551 /* Any page-aligned address will do, the test is non-destructive */
552 __set_fixmap(FIX_WP_TEST, __pa(&swapper_pg_dir), PAGE_READONLY);
553 boot_cpu_data.wp_works_ok = do_test_wp_bit();
554 clear_fixmap(FIX_WP_TEST);
556 if (!boot_cpu_data.wp_works_ok) {
557 printk(KERN_CONT "No.\n");
558 #ifdef CONFIG_X86_WP_WORKS_OK
559 panic(
560 "This kernel doesn't support CPU's with broken WP. Recompile it for a 386!");
561 #endif
562 } else {
563 printk(KERN_CONT "Ok.\n");
567 static struct kcore_list kcore_mem, kcore_vmalloc;
569 void __init mem_init(void)
571 int codesize, reservedpages, datasize, initsize;
572 int tmp, bad_ppro;
574 #ifdef CONFIG_FLATMEM
575 BUG_ON(!mem_map);
576 #endif
577 bad_ppro = ppro_with_ram_bug();
579 #ifdef CONFIG_HIGHMEM
580 /* check that fixmap and pkmap do not overlap */
581 if (PKMAP_BASE + LAST_PKMAP*PAGE_SIZE >= FIXADDR_START) {
582 printk(KERN_ERR
583 "fixmap and kmap areas overlap - this will crash\n");
584 printk(KERN_ERR "pkstart: %lxh pkend: %lxh fixstart %lxh\n",
585 PKMAP_BASE, PKMAP_BASE + LAST_PKMAP*PAGE_SIZE,
586 FIXADDR_START);
587 BUG();
589 #endif
590 /* this will put all low memory onto the freelists */
591 totalram_pages += free_all_bootmem();
593 reservedpages = 0;
594 for (tmp = 0; tmp < max_low_pfn; tmp++)
596 * Only count reserved RAM pages:
598 if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp)))
599 reservedpages++;
601 set_highmem_pages_init(bad_ppro);
603 codesize = (unsigned long) &_etext - (unsigned long) &_text;
604 datasize = (unsigned long) &_edata - (unsigned long) &_etext;
605 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
607 kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
608 kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
609 VMALLOC_END-VMALLOC_START);
611 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
612 "%dk reserved, %dk data, %dk init, %ldk highmem)\n",
613 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
614 num_physpages << (PAGE_SHIFT-10),
615 codesize >> 10,
616 reservedpages << (PAGE_SHIFT-10),
617 datasize >> 10,
618 initsize >> 10,
619 (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))
622 #if 1 /* double-sanity-check paranoia */
623 printk(KERN_INFO "virtual kernel memory layout:\n"
624 " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
625 #ifdef CONFIG_HIGHMEM
626 " pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
627 #endif
628 " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n"
629 " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n"
630 " .init : 0x%08lx - 0x%08lx (%4ld kB)\n"
631 " .data : 0x%08lx - 0x%08lx (%4ld kB)\n"
632 " .text : 0x%08lx - 0x%08lx (%4ld kB)\n",
633 FIXADDR_START, FIXADDR_TOP,
634 (FIXADDR_TOP - FIXADDR_START) >> 10,
636 #ifdef CONFIG_HIGHMEM
637 PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
638 (LAST_PKMAP*PAGE_SIZE) >> 10,
639 #endif
641 VMALLOC_START, VMALLOC_END,
642 (VMALLOC_END - VMALLOC_START) >> 20,
644 (unsigned long)__va(0), (unsigned long)high_memory,
645 ((unsigned long)high_memory - (unsigned long)__va(0)) >> 20,
647 (unsigned long)&__init_begin, (unsigned long)&__init_end,
648 ((unsigned long)&__init_end -
649 (unsigned long)&__init_begin) >> 10,
651 (unsigned long)&_etext, (unsigned long)&_edata,
652 ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
654 (unsigned long)&_text, (unsigned long)&_etext,
655 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
657 #ifdef CONFIG_HIGHMEM
658 BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE > FIXADDR_START);
659 BUG_ON(VMALLOC_END > PKMAP_BASE);
660 #endif
661 BUG_ON(VMALLOC_START > VMALLOC_END);
662 BUG_ON((unsigned long)high_memory > VMALLOC_START);
663 #endif /* double-sanity-check paranoia */
665 if (boot_cpu_data.wp_works_ok < 0)
666 test_wp_bit();
668 cpa_init();
671 * Subtle. SMP is doing it's boot stuff late (because it has to
672 * fork idle threads) - but it also needs low mappings for the
673 * protected-mode entry to work. We zap these entries only after
674 * the WP-bit has been tested.
676 #ifndef CONFIG_SMP
677 zap_low_mappings();
678 #endif
681 #ifdef CONFIG_MEMORY_HOTPLUG
682 int arch_add_memory(int nid, u64 start, u64 size)
684 struct pglist_data *pgdata = NODE_DATA(nid);
685 struct zone *zone = pgdata->node_zones + ZONE_HIGHMEM;
686 unsigned long start_pfn = start >> PAGE_SHIFT;
687 unsigned long nr_pages = size >> PAGE_SHIFT;
689 return __add_pages(zone, start_pfn, nr_pages);
691 #endif
694 * This function cannot be __init, since exceptions don't work in that
695 * section. Put this after the callers, so that it cannot be inlined.
697 static noinline int do_test_wp_bit(void)
699 char tmp_reg;
700 int flag;
702 __asm__ __volatile__(
703 " movb %0, %1 \n"
704 "1: movb %1, %0 \n"
705 " xorl %2, %2 \n"
706 "2: \n"
707 _ASM_EXTABLE(1b,2b)
708 :"=m" (*(char *)fix_to_virt(FIX_WP_TEST)),
709 "=q" (tmp_reg),
710 "=r" (flag)
711 :"2" (1)
712 :"memory");
714 return flag;
717 #ifdef CONFIG_DEBUG_RODATA
718 const int rodata_test_data = 0xC3;
719 EXPORT_SYMBOL_GPL(rodata_test_data);
721 void mark_rodata_ro(void)
723 unsigned long start = PFN_ALIGN(_text);
724 unsigned long size = PFN_ALIGN(_etext) - start;
726 #ifndef CONFIG_KPROBES
727 #ifdef CONFIG_HOTPLUG_CPU
728 /* It must still be possible to apply SMP alternatives. */
729 if (num_possible_cpus() <= 1)
730 #endif
732 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
733 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
734 size >> 10);
736 #ifdef CONFIG_CPA_DEBUG
737 printk(KERN_INFO "Testing CPA: Reverting %lx-%lx\n",
738 start, start+size);
739 set_pages_rw(virt_to_page(start), size>>PAGE_SHIFT);
741 printk(KERN_INFO "Testing CPA: write protecting again\n");
742 set_pages_ro(virt_to_page(start), size>>PAGE_SHIFT);
743 #endif
745 #endif
746 start += size;
747 size = (unsigned long)__end_rodata - start;
748 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
749 printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
750 size >> 10);
751 rodata_test();
753 #ifdef CONFIG_CPA_DEBUG
754 printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, start + size);
755 set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT);
757 printk(KERN_INFO "Testing CPA: write protecting again\n");
758 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
759 #endif
761 #endif
763 void free_init_pages(char *what, unsigned long begin, unsigned long end)
765 #ifdef CONFIG_DEBUG_PAGEALLOC
767 * If debugging page accesses then do not free this memory but
768 * mark them not present - any buggy init-section access will
769 * create a kernel page fault:
771 printk(KERN_INFO "debug: unmapping init memory %08lx..%08lx\n",
772 begin, PAGE_ALIGN(end));
773 set_memory_np(begin, (end - begin) >> PAGE_SHIFT);
774 #else
775 unsigned long addr;
778 * We just marked the kernel text read only above, now that
779 * we are going to free part of that, we need to make that
780 * writeable first.
782 set_memory_rw(begin, (end - begin) >> PAGE_SHIFT);
784 for (addr = begin; addr < end; addr += PAGE_SIZE) {
785 ClearPageReserved(virt_to_page(addr));
786 init_page_count(virt_to_page(addr));
787 memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
788 free_page(addr);
789 totalram_pages++;
791 printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
792 #endif
795 void free_initmem(void)
797 free_init_pages("unused kernel memory",
798 (unsigned long)(&__init_begin),
799 (unsigned long)(&__init_end));
802 #ifdef CONFIG_BLK_DEV_INITRD
803 void free_initrd_mem(unsigned long start, unsigned long end)
805 free_init_pages("initrd memory", start, end);
807 #endif