2 * linux/arch/sh/mm/init.c
4 * Copyright (C) 1999 Niibe Yutaka
5 * Copyright (C) 2002 - 2007 Paul Mundt
7 * Based on linux/arch/i386/mm/init.c:
8 * Copyright (C) 1995 Linus Torvalds
11 #include <linux/swap.h>
12 #include <linux/init.h>
13 #include <linux/bootmem.h>
14 #include <linux/proc_fs.h>
15 #include <linux/pagemap.h>
16 #include <linux/percpu.h>
18 #include <asm/mmu_context.h>
20 #include <asm/cacheflush.h>
21 #include <asm/sections.h>
22 #include <asm/cache.h>
24 DEFINE_PER_CPU(struct mmu_gather
, mmu_gathers
);
25 pgd_t swapper_pg_dir
[PTRS_PER_PGD
];
27 void (*copy_page
)(void *from
, void *to
);
28 void (*clear_page
)(void *to
);
32 int total
= 0, reserved
= 0, free
= 0;
33 int shared
= 0, cached
= 0, slab
= 0;
36 printk("Mem-info:\n");
39 for_each_online_pgdat(pgdat
) {
40 unsigned long flags
, i
;
42 pgdat_resize_lock(pgdat
, &flags
);
43 for (i
= 0; i
< pgdat
->node_spanned_pages
; i
++) {
44 struct page
*page
= pgdat_page_nr(pgdat
, i
);
46 if (PageReserved(page
))
48 else if (PageSwapCache(page
))
50 else if (PageSlab(page
))
52 else if (!page_count(page
))
55 shared
+= page_count(page
) - 1;
57 pgdat_resize_unlock(pgdat
, &flags
);
60 printk("Free swap: %6ldkB\n", nr_swap_pages
<<(PAGE_SHIFT
-10));
61 printk("%d pages of RAM\n", total
);
62 printk("%d free pages\n", free
);
63 printk("%d reserved pages\n", reserved
);
64 printk("%d slab pages\n", slab
);
65 printk("%d pages shared\n", shared
);
66 printk("%d pages swap cached\n", cached
);
67 printk(KERN_INFO
"Total of %ld pages in page table cache\n",
68 quicklist_total_size());
72 static void set_pte_phys(unsigned long addr
, unsigned long phys
, pgprot_t prot
)
79 pgd
= pgd_offset_k(addr
);
85 pud
= pud_alloc(NULL
, pgd
, addr
);
91 pmd
= pmd_alloc(NULL
, pud
, addr
);
97 pte
= pte_offset_kernel(pmd
, addr
);
98 if (!pte_none(*pte
)) {
103 set_pte(pte
, pfn_pte(phys
>> PAGE_SHIFT
, prot
));
105 flush_tlb_one(get_asid(), addr
);
109 * As a performance optimization, other platforms preserve the fixmap mapping
110 * across a context switch, we don't presently do this, but this could be done
111 * in a similar fashion as to the wired TLB interface that sh64 uses (by way
112 * of the memory mapped UTLB configuration) -- this unfortunately forces us to
113 * give up a TLB entry for each mapping we want to preserve. While this may be
114 * viable for a small number of fixmaps, it's not particularly useful for
115 * everything and needs to be carefully evaluated. (ie, we may want this for
116 * the vsyscall page).
118 * XXX: Perhaps add a _PAGE_WIRED flag or something similar that we can pass
119 * in at __set_fixmap() time to determine the appropriate behavior to follow.
123 void __set_fixmap(enum fixed_addresses idx
, unsigned long phys
, pgprot_t prot
)
125 unsigned long address
= __fix_to_virt(idx
);
127 if (idx
>= __end_of_fixed_addresses
) {
132 set_pte_phys(address
, phys
, prot
);
134 #endif /* CONFIG_MMU */
137 * paging_init() sets up the page tables
139 void __init
paging_init(void)
141 unsigned long max_zone_pfns
[MAX_NR_ZONES
];
144 /* We don't need to map the kernel through the TLB, as
145 * it is permanatly mapped using P1. So clear the
147 memset(swapper_pg_dir
, 0, sizeof(swapper_pg_dir
));
149 /* Set an initial value for the MMU.TTB so we don't have to
150 * check for a null value. */
151 set_TTB(swapper_pg_dir
);
153 memset(max_zone_pfns
, 0, sizeof(max_zone_pfns
));
155 for_each_online_node(nid
) {
156 pg_data_t
*pgdat
= NODE_DATA(nid
);
157 unsigned long low
, start_pfn
;
159 start_pfn
= pgdat
->bdata
->node_boot_start
>> PAGE_SHIFT
;
160 low
= pgdat
->bdata
->node_low_pfn
;
162 if (max_zone_pfns
[ZONE_NORMAL
] < low
)
163 max_zone_pfns
[ZONE_NORMAL
] = low
;
165 printk("Node %u: start_pfn = 0x%lx, low = 0x%lx\n",
166 nid
, start_pfn
, low
);
169 free_area_init_nodes(max_zone_pfns
);
172 static struct kcore_list kcore_mem
, kcore_vmalloc
;
174 void __init
mem_init(void)
176 int codesize
, datasize
, initsize
;
182 for_each_online_node(nid
) {
183 pg_data_t
*pgdat
= NODE_DATA(nid
);
184 unsigned long node_pages
= 0;
185 void *node_high_memory
;
187 num_physpages
+= pgdat
->node_present_pages
;
189 if (pgdat
->node_spanned_pages
)
190 node_pages
= free_all_bootmem_node(pgdat
);
192 totalram_pages
+= node_pages
;
194 node_high_memory
= (void *)__va((pgdat
->node_start_pfn
+
195 pgdat
->node_spanned_pages
) <<
197 if (node_high_memory
> high_memory
)
198 high_memory
= node_high_memory
;
201 /* clear the zero-page */
202 memset(empty_zero_page
, 0, PAGE_SIZE
);
203 __flush_wback_region(empty_zero_page
, PAGE_SIZE
);
206 * Setup wrappers for copy/clear_page(), these will get overridden
207 * later in the boot process if a better method is available.
210 copy_page
= copy_page_slow
;
211 clear_page
= clear_page_slow
;
213 copy_page
= copy_page_nommu
;
214 clear_page
= clear_page_nommu
;
217 codesize
= (unsigned long) &_etext
- (unsigned long) &_text
;
218 datasize
= (unsigned long) &_edata
- (unsigned long) &_etext
;
219 initsize
= (unsigned long) &__init_end
- (unsigned long) &__init_begin
;
221 kclist_add(&kcore_mem
, __va(0), max_low_pfn
<< PAGE_SHIFT
);
222 kclist_add(&kcore_vmalloc
, (void *)VMALLOC_START
,
223 VMALLOC_END
- VMALLOC_START
);
225 printk(KERN_INFO
"Memory: %luk/%luk available (%dk kernel code, "
226 "%dk data, %dk init)\n",
227 (unsigned long) nr_free_pages() << (PAGE_SHIFT
-10),
228 num_physpages
<< (PAGE_SHIFT
-10),
235 /* Initialize the vDSO */
239 void free_initmem(void)
243 addr
= (unsigned long)(&__init_begin
);
244 for (; addr
< (unsigned long)(&__init_end
); addr
+= PAGE_SIZE
) {
245 ClearPageReserved(virt_to_page(addr
));
246 init_page_count(virt_to_page(addr
));
250 printk("Freeing unused kernel memory: %ldk freed\n",
251 ((unsigned long)&__init_end
-
252 (unsigned long)&__init_begin
) >> 10);
255 #ifdef CONFIG_BLK_DEV_INITRD
256 void free_initrd_mem(unsigned long start
, unsigned long end
)
259 for (p
= start
; p
< end
; p
+= PAGE_SIZE
) {
260 ClearPageReserved(virt_to_page(p
));
261 init_page_count(virt_to_page(p
));
265 printk("Freeing initrd memory: %ldk freed\n", (end
- start
) >> 10);
269 #ifdef CONFIG_MEMORY_HOTPLUG
270 void online_page(struct page
*page
)
272 ClearPageReserved(page
);
273 init_page_count(page
);
279 int arch_add_memory(int nid
, u64 start
, u64 size
)
282 unsigned long start_pfn
= start
>> PAGE_SHIFT
;
283 unsigned long nr_pages
= size
>> PAGE_SHIFT
;
286 pgdat
= NODE_DATA(nid
);
288 /* We only have ZONE_NORMAL, so this is easy.. */
289 ret
= __add_pages(pgdat
->node_zones
+ ZONE_NORMAL
, start_pfn
, nr_pages
);
291 printk("%s: Failed, __add_pages() == %d\n", __FUNCTION__
, ret
);
295 EXPORT_SYMBOL_GPL(arch_add_memory
);
298 int memory_add_physaddr_to_nid(u64 addr
)
300 /* Node 0 for now.. */
303 EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid
);