kvm tools, setup: Create private directory
[linux-2.6/next.git] / arch / m68k / mm / sun3mmu.c
blob1b902dbd4376d7bb4a5e6882edba77bb5384515f
1 /*
2 * linux/arch/m68k/mm/sun3mmu.c
4 * Implementations of mm routines specific to the sun3 MMU.
6 * Moved here 8/20/1999 Sam Creasey
8 */
10 #include <linux/signal.h>
11 #include <linux/sched.h>
12 #include <linux/mm.h>
13 #include <linux/swap.h>
14 #include <linux/kernel.h>
15 #include <linux/string.h>
16 #include <linux/types.h>
17 #include <linux/init.h>
18 #include <linux/bootmem.h>
20 #include <asm/setup.h>
21 #include <asm/uaccess.h>
22 #include <asm/page.h>
23 #include <asm/pgtable.h>
24 #include <asm/system.h>
25 #include <asm/machdep.h>
26 #include <asm/io.h>
28 extern void mmu_emu_init (unsigned long bootmem_end);
30 const char bad_pmd_string[] = "Bad pmd in pte_alloc: %08lx\n";
32 extern unsigned long num_pages;
34 void free_initmem(void)
38 /* For the sun3 we try to follow the i386 paging_init() more closely */
39 /* start_mem and end_mem have PAGE_OFFSET added already */
40 /* now sets up tables using sun3 PTEs rather than i386 as before. --m */
41 void __init paging_init(void)
43 pgd_t * pg_dir;
44 pte_t * pg_table;
45 int i;
46 unsigned long address;
47 unsigned long next_pgtable;
48 unsigned long bootmem_end;
49 unsigned long zones_size[MAX_NR_ZONES] = { 0, };
50 unsigned long size;
52 #ifdef TEST_VERIFY_AREA
53 wp_works_ok = 0;
54 #endif
55 empty_zero_page = alloc_bootmem_pages(PAGE_SIZE);
57 address = PAGE_OFFSET;
58 pg_dir = swapper_pg_dir;
59 memset (swapper_pg_dir, 0, sizeof (swapper_pg_dir));
60 memset (kernel_pg_dir, 0, sizeof (kernel_pg_dir));
62 size = num_pages * sizeof(pte_t);
63 size = (size + PAGE_SIZE) & ~(PAGE_SIZE-1);
65 next_pgtable = (unsigned long)alloc_bootmem_pages(size);
66 bootmem_end = (next_pgtable + size + PAGE_SIZE) & PAGE_MASK;
68 /* Map whole memory from PAGE_OFFSET (0x0E000000) */
69 pg_dir += PAGE_OFFSET >> PGDIR_SHIFT;
71 while (address < (unsigned long)high_memory) {
72 pg_table = (pte_t *) __pa (next_pgtable);
73 next_pgtable += PTRS_PER_PTE * sizeof (pte_t);
74 pgd_val(*pg_dir) = (unsigned long) pg_table;
75 pg_dir++;
77 /* now change pg_table to kernel virtual addresses */
78 pg_table = (pte_t *) __va ((unsigned long) pg_table);
79 for (i=0; i<PTRS_PER_PTE; ++i, ++pg_table) {
80 pte_t pte = pfn_pte(virt_to_pfn(address), PAGE_INIT);
81 if (address >= (unsigned long)high_memory)
82 pte_val (pte) = 0;
83 set_pte (pg_table, pte);
84 address += PAGE_SIZE;
88 mmu_emu_init(bootmem_end);
90 current->mm = NULL;
92 /* memory sizing is a hack stolen from motorola.c.. hope it works for us */
93 zones_size[ZONE_DMA] = ((unsigned long)high_memory - PAGE_OFFSET) >> PAGE_SHIFT;
95 /* I really wish I knew why the following change made things better... -- Sam */
96 /* free_area_init(zones_size); */
97 free_area_init_node(0, zones_size,
98 (__pa(PAGE_OFFSET) >> PAGE_SHIFT) + 1, NULL);