1 /* $Id: init.c,v 1.19 2004/02/21 04:42:16 kkojima Exp $
3 * linux/arch/sh/mm/init.c
5 * Copyright (C) 1999 Niibe Yutaka
6 * Copyright (C) 2002, 2004 Paul Mundt
8 * Based on linux/arch/i386/mm/init.c:
9 * Copyright (C) 1995 Linus Torvalds
12 #include <linux/signal.h>
13 #include <linux/sched.h>
14 #include <linux/kernel.h>
15 #include <linux/errno.h>
16 #include <linux/string.h>
17 #include <linux/types.h>
18 #include <linux/ptrace.h>
19 #include <linux/mman.h>
21 #include <linux/swap.h>
22 #include <linux/smp.h>
23 #include <linux/init.h>
24 #include <linux/highmem.h>
25 #include <linux/bootmem.h>
26 #include <linux/pagemap.h>
27 #include <linux/proc_fs.h>
28 #include <asm/processor.h>
29 #include <asm/system.h>
30 #include <asm/uaccess.h>
31 #include <asm/pgtable.h>
32 #include <asm/pgalloc.h>
33 #include <asm/mmu_context.h>
36 #include <asm/cacheflush.h>
37 #include <asm/cache.h>
39 DEFINE_PER_CPU(struct mmu_gather
, mmu_gathers
);
40 pgd_t swapper_pg_dir
[PTRS_PER_PGD
];
43 * Cache of MMU context last used.
45 unsigned long mmu_context_cache
= NO_CONTEXT
;
48 /* It'd be good if these lines were in the standard header file. */
49 #define START_PFN (NODE_DATA(0)->bdata->node_boot_start >> PAGE_SHIFT)
50 #define MAX_LOW_PFN (NODE_DATA(0)->bdata->node_low_pfn)
53 void (*copy_page
)(void *from
, void *to
);
54 void (*clear_page
)(void *to
);
58 int i
, total
= 0, reserved
= 0;
59 int shared
= 0, cached
= 0;
61 printk("Mem-info:\n");
63 printk("Free swap: %6ldkB\n", nr_swap_pages
<<(PAGE_SHIFT
-10));
67 if (PageReserved(mem_map
+i
))
69 else if (PageSwapCache(mem_map
+i
))
71 else if (page_count(mem_map
+i
))
72 shared
+= page_count(mem_map
+i
) - 1;
74 printk("%d pages of RAM\n",total
);
75 printk("%d reserved pages\n",reserved
);
76 printk("%d pages shared\n",shared
);
77 printk("%d pages swap cached\n",cached
);
80 static void set_pte_phys(unsigned long addr
, unsigned long phys
, pgprot_t prot
)
87 pgd
= swapper_pg_dir
+ pgd_index(addr
);
93 pud
= pud_offset(pgd
, addr
);
95 pmd
= (pmd_t
*)get_zeroed_page(GFP_ATOMIC
);
96 set_pud(pud
, __pud(__pa(pmd
) | _KERNPG_TABLE
| _PAGE_USER
));
97 if (pmd
!= pmd_offset(pud
, 0)) {
103 pmd
= pmd_offset(pud
, addr
);
104 if (pmd_none(*pmd
)) {
105 pte
= (pte_t
*)get_zeroed_page(GFP_ATOMIC
);
106 set_pmd(pmd
, __pmd(__pa(pte
) | _KERNPG_TABLE
| _PAGE_USER
));
107 if (pte
!= pte_offset_kernel(pmd
, 0)) {
113 pte
= pte_offset_kernel(pmd
, addr
);
114 if (!pte_none(*pte
)) {
119 set_pte(pte
, pfn_pte(phys
>> PAGE_SHIFT
, prot
));
121 __flush_tlb_page(get_asid(), addr
);
125 * As a performance optimization, other platforms preserve the fixmap mapping
126 * across a context switch, we don't presently do this, but this could be done
127 * in a similar fashion as to the wired TLB interface that sh64 uses (by way
128 * of the memorry mapped UTLB configuration) -- this unfortunately forces us to
129 * give up a TLB entry for each mapping we want to preserve. While this may be
130 * viable for a small number of fixmaps, it's not particularly useful for
131 * everything and needs to be carefully evaluated. (ie, we may want this for
132 * the vsyscall page).
134 * XXX: Perhaps add a _PAGE_WIRED flag or something similar that we can pass
135 * in at __set_fixmap() time to determine the appropriate behavior to follow.
139 void __set_fixmap(enum fixed_addresses idx
, unsigned long phys
, pgprot_t prot
)
141 unsigned long address
= __fix_to_virt(idx
);
143 if (idx
>= __end_of_fixed_addresses
) {
148 set_pte_phys(address
, phys
, prot
);
151 /* References to section boundaries */
153 extern char _text
, _etext
, _edata
, __bss_start
, _end
;
154 extern char __init_begin
, __init_end
;
157 * paging_init() sets up the page tables
159 * This routines also unmaps the page at virtual kernel address 0, so
160 * that we can trap those pesky NULL-reference errors in the kernel.
162 void __init
paging_init(void)
164 unsigned long zones_size
[MAX_NR_ZONES
] = { 0, };
167 * Setup some defaults for the zone sizes.. these should be safe
168 * regardless of distcontiguous memory or MMU settings.
170 zones_size
[ZONE_DMA
] = 0 >> PAGE_SHIFT
;
171 zones_size
[ZONE_NORMAL
] = __MEMORY_SIZE
>> PAGE_SHIFT
;
172 #ifdef CONFIG_HIGHMEM
173 zones_size
[ZONE_HIGHMEM
] = 0 >> PAGE_SHIFT
;
178 * If we have an MMU, and want to be using it .. we need to adjust
179 * the zone sizes accordingly, in addition to turning it on.
182 unsigned long max_dma
, low
, start_pfn
;
186 /* We don't need kernel mapping as hardware support that. */
187 pg_dir
= swapper_pg_dir
;
189 for (i
= 0; i
< PTRS_PER_PGD
; i
++)
190 pgd_val(pg_dir
[i
]) = 0;
192 /* Turn on the MMU */
195 /* Fixup the zone sizes */
196 start_pfn
= START_PFN
;
197 max_dma
= virt_to_phys((char *)MAX_DMA_ADDRESS
) >> PAGE_SHIFT
;
201 zones_size
[ZONE_DMA
] = low
- start_pfn
;
202 zones_size
[ZONE_NORMAL
] = 0;
204 zones_size
[ZONE_DMA
] = max_dma
- start_pfn
;
205 zones_size
[ZONE_NORMAL
] = low
- max_dma
;
209 #elif defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4)
211 * If we don't have CONFIG_MMU set and the processor in question
212 * still has an MMU, care needs to be taken to make sure it doesn't
213 * stay on.. Since the boot loader could have potentially already
214 * turned it on, and we clearly don't want it, we simply turn it off.
216 * We don't need to do anything special for the zone sizes, since the
217 * default values that were already configured up above should be
222 NODE_DATA(0)->node_mem_map
= NULL
;
223 free_area_init_node(0, NODE_DATA(0), zones_size
, __MEMORY_START
>> PAGE_SHIFT
, 0);
226 static struct kcore_list kcore_mem
, kcore_vmalloc
;
228 void __init
mem_init(void)
230 extern unsigned long empty_zero_page
[1024];
231 int codesize
, reservedpages
, datasize
, initsize
;
233 extern unsigned long memory_start
;
236 high_memory
= (void *)__va(MAX_LOW_PFN
* PAGE_SIZE
);
238 extern unsigned long memory_end
;
240 high_memory
= (void *)(memory_end
& PAGE_MASK
);
243 max_mapnr
= num_physpages
= MAP_NR(high_memory
) - MAP_NR(memory_start
);
245 /* clear the zero-page */
246 memset(empty_zero_page
, 0, PAGE_SIZE
);
247 __flush_wback_region(empty_zero_page
, PAGE_SIZE
);
250 * Setup wrappers for copy/clear_page(), these will get overridden
251 * later in the boot process if a better method is available.
254 copy_page
= copy_page_slow
;
255 clear_page
= clear_page_slow
;
257 copy_page
= copy_page_nommu
;
258 clear_page
= clear_page_nommu
;
261 /* this will put all low memory onto the freelists */
262 totalram_pages
+= free_all_bootmem_node(NODE_DATA(0));
264 for (tmp
= 0; tmp
< num_physpages
; tmp
++)
266 * Only count reserved RAM pages
268 if (PageReserved(mem_map
+tmp
))
271 codesize
= (unsigned long) &_etext
- (unsigned long) &_text
;
272 datasize
= (unsigned long) &_edata
- (unsigned long) &_etext
;
273 initsize
= (unsigned long) &__init_end
- (unsigned long) &__init_begin
;
275 kclist_add(&kcore_mem
, __va(0), max_low_pfn
<< PAGE_SHIFT
);
276 kclist_add(&kcore_vmalloc
, (void *)VMALLOC_START
,
277 VMALLOC_END
- VMALLOC_START
);
279 printk(KERN_INFO
"Memory: %luk/%luk available (%dk kernel code, "
280 "%dk reserved, %dk data, %dk init)\n",
281 (unsigned long) nr_free_pages() << (PAGE_SHIFT
-10),
282 max_mapnr
<< (PAGE_SHIFT
-10),
284 reservedpages
<< (PAGE_SHIFT
-10),
290 /* Initialize the vDSO */
294 void free_initmem(void)
298 addr
= (unsigned long)(&__init_begin
);
299 for (; addr
< (unsigned long)(&__init_end
); addr
+= PAGE_SIZE
) {
300 ClearPageReserved(virt_to_page(addr
));
301 init_page_count(virt_to_page(addr
));
305 printk ("Freeing unused kernel memory: %dk freed\n", (&__init_end
- &__init_begin
) >> 10);
308 #ifdef CONFIG_BLK_DEV_INITRD
309 void free_initrd_mem(unsigned long start
, unsigned long end
)
312 for (p
= start
; p
< end
; p
+= PAGE_SIZE
) {
313 ClearPageReserved(virt_to_page(p
));
314 init_page_count(virt_to_page(p
));
318 printk ("Freeing initrd memory: %ldk freed\n", (end
- start
) >> 10);