2 * linux/arch/sh/mm/init.c
4 * Copyright (C) 1999 Niibe Yutaka
5 * Copyright (C) 2002 - 2007 Paul Mundt
7 * Based on linux/arch/i386/mm/init.c:
8 * Copyright (C) 1995 Linus Torvalds
11 #include <linux/swap.h>
12 #include <linux/init.h>
13 #include <linux/bootmem.h>
14 #include <linux/proc_fs.h>
15 #include <linux/pagemap.h>
16 #include <linux/percpu.h>
18 #include <linux/dma-mapping.h>
19 #include <asm/mmu_context.h>
21 #include <asm/cacheflush.h>
22 #include <asm/sections.h>
23 #include <asm/cache.h>
25 DEFINE_PER_CPU(struct mmu_gather
, mmu_gathers
);
26 pgd_t swapper_pg_dir
[PTRS_PER_PGD
];
28 #ifdef CONFIG_SUPERH32
30 * Handle trivial transitions between cached and uncached
31 * segments, making use of the 1:1 mapping relationship in
34 * This is the offset of the uncached section from its cached alias.
35 * Default value only valid in 29 bit mode, in 32bit mode will be
36 * overridden in pmb_init.
38 unsigned long cached_to_uncached
= P2SEG
- P1SEG
;
42 static void set_pte_phys(unsigned long addr
, unsigned long phys
, pgprot_t prot
)
49 pgd
= pgd_offset_k(addr
);
55 pud
= pud_alloc(NULL
, pgd
, addr
);
61 pmd
= pmd_alloc(NULL
, pud
, addr
);
67 pte
= pte_offset_kernel(pmd
, addr
);
68 if (!pte_none(*pte
)) {
73 set_pte(pte
, pfn_pte(phys
>> PAGE_SHIFT
, prot
));
74 local_flush_tlb_one(get_asid(), addr
);
78 * As a performance optimization, other platforms preserve the fixmap mapping
79 * across a context switch, we don't presently do this, but this could be done
80 * in a similar fashion as to the wired TLB interface that sh64 uses (by way
81 * of the memory mapped UTLB configuration) -- this unfortunately forces us to
82 * give up a TLB entry for each mapping we want to preserve. While this may be
83 * viable for a small number of fixmaps, it's not particularly useful for
84 * everything and needs to be carefully evaluated. (ie, we may want this for
87 * XXX: Perhaps add a _PAGE_WIRED flag or something similar that we can pass
88 * in at __set_fixmap() time to determine the appropriate behavior to follow.
92 void __set_fixmap(enum fixed_addresses idx
, unsigned long phys
, pgprot_t prot
)
94 unsigned long address
= __fix_to_virt(idx
);
96 if (idx
>= __end_of_fixed_addresses
) {
101 set_pte_phys(address
, phys
, prot
);
104 void __init
page_table_range_init(unsigned long start
, unsigned long end
,
115 i
= __pgd_offset(vaddr
);
116 j
= __pud_offset(vaddr
);
117 k
= __pmd_offset(vaddr
);
120 for ( ; (i
< PTRS_PER_PGD
) && (vaddr
!= end
); pgd
++, i
++) {
122 for ( ; (j
< PTRS_PER_PUD
) && (vaddr
!= end
); pud
++, j
++) {
124 for (; (k
< PTRS_PER_PMD
) && (vaddr
!= end
); pmd
++, k
++) {
125 if (pmd_none(*pmd
)) {
126 pte
= (pte_t
*) alloc_bootmem_low_pages(PAGE_SIZE
);
127 pmd_populate_kernel(&init_mm
, pmd
, pte
);
128 BUG_ON(pte
!= pte_offset_kernel(pmd
, 0));
137 #endif /* CONFIG_MMU */
140 * paging_init() sets up the page tables
142 void __init
paging_init(void)
144 unsigned long max_zone_pfns
[MAX_NR_ZONES
];
145 unsigned long vaddr
, end
;
148 /* We don't need to map the kernel through the TLB, as
149 * it is permanatly mapped using P1. So clear the
151 memset(swapper_pg_dir
, 0, sizeof(swapper_pg_dir
));
153 /* Set an initial value for the MMU.TTB so we don't have to
154 * check for a null value. */
155 set_TTB(swapper_pg_dir
);
158 * Populate the relevant portions of swapper_pg_dir so that
159 * we can use the fixmap entries without calling kmalloc.
160 * pte's will be filled in by __set_fixmap().
162 vaddr
= __fix_to_virt(__end_of_fixed_addresses
- 1) & PMD_MASK
;
163 end
= (FIXADDR_TOP
+ PMD_SIZE
- 1) & PMD_MASK
;
164 page_table_range_init(vaddr
, end
, swapper_pg_dir
);
166 kmap_coherent_init();
168 memset(max_zone_pfns
, 0, sizeof(max_zone_pfns
));
170 for_each_online_node(nid
) {
171 pg_data_t
*pgdat
= NODE_DATA(nid
);
172 unsigned long low
, start_pfn
;
174 start_pfn
= pgdat
->bdata
->node_min_pfn
;
175 low
= pgdat
->bdata
->node_low_pfn
;
177 if (max_zone_pfns
[ZONE_NORMAL
] < low
)
178 max_zone_pfns
[ZONE_NORMAL
] = low
;
180 printk("Node %u: start_pfn = 0x%lx, low = 0x%lx\n",
181 nid
, start_pfn
, low
);
184 free_area_init_nodes(max_zone_pfns
);
186 /* Set up the uncached fixmap */
187 set_fixmap_nocache(FIX_UNCACHED
, __pa(&__uncached_start
));
191 * Early initialization for any I/O MMUs we might have.
193 static void __init
iommu_init(void)
198 void __init
mem_init(void)
200 int codesize
, datasize
, initsize
;
208 for_each_online_node(nid
) {
209 pg_data_t
*pgdat
= NODE_DATA(nid
);
210 unsigned long node_pages
= 0;
211 void *node_high_memory
;
213 num_physpages
+= pgdat
->node_present_pages
;
215 if (pgdat
->node_spanned_pages
)
216 node_pages
= free_all_bootmem_node(pgdat
);
218 totalram_pages
+= node_pages
;
220 node_high_memory
= (void *)__va((pgdat
->node_start_pfn
+
221 pgdat
->node_spanned_pages
) <<
223 if (node_high_memory
> high_memory
)
224 high_memory
= node_high_memory
;
227 /* Set this up early, so we can take care of the zero page */
230 /* clear the zero-page */
231 memset(empty_zero_page
, 0, PAGE_SIZE
);
232 __flush_wback_region(empty_zero_page
, PAGE_SIZE
);
234 codesize
= (unsigned long) &_etext
- (unsigned long) &_text
;
235 datasize
= (unsigned long) &_edata
- (unsigned long) &_etext
;
236 initsize
= (unsigned long) &__init_end
- (unsigned long) &__init_begin
;
238 printk(KERN_INFO
"Memory: %luk/%luk available (%dk kernel code, "
239 "%dk data, %dk init)\n",
240 nr_free_pages() << (PAGE_SHIFT
-10),
241 num_physpages
<< (PAGE_SHIFT
-10),
246 /* Initialize the vDSO */
250 void free_initmem(void)
254 addr
= (unsigned long)(&__init_begin
);
255 for (; addr
< (unsigned long)(&__init_end
); addr
+= PAGE_SIZE
) {
256 ClearPageReserved(virt_to_page(addr
));
257 init_page_count(virt_to_page(addr
));
261 printk("Freeing unused kernel memory: %ldk freed\n",
262 ((unsigned long)&__init_end
-
263 (unsigned long)&__init_begin
) >> 10);
266 #ifdef CONFIG_BLK_DEV_INITRD
267 void free_initrd_mem(unsigned long start
, unsigned long end
)
270 for (p
= start
; p
< end
; p
+= PAGE_SIZE
) {
271 ClearPageReserved(virt_to_page(p
));
272 init_page_count(virt_to_page(p
));
276 printk("Freeing initrd memory: %ldk freed\n", (end
- start
) >> 10);
280 #if THREAD_SHIFT < PAGE_SHIFT
281 static struct kmem_cache
*thread_info_cache
;
283 struct thread_info
*alloc_thread_info(struct task_struct
*tsk
)
285 struct thread_info
*ti
;
287 ti
= kmem_cache_alloc(thread_info_cache
, GFP_KERNEL
);
288 if (unlikely(ti
== NULL
))
290 #ifdef CONFIG_DEBUG_STACK_USAGE
291 memset(ti
, 0, THREAD_SIZE
);
296 void free_thread_info(struct thread_info
*ti
)
298 kmem_cache_free(thread_info_cache
, ti
);
301 void thread_info_cache_init(void)
303 thread_info_cache
= kmem_cache_create("thread_info", THREAD_SIZE
,
304 THREAD_SIZE
, 0, NULL
);
305 BUG_ON(thread_info_cache
== NULL
);
307 #endif /* THREAD_SHIFT < PAGE_SHIFT */
309 #ifdef CONFIG_MEMORY_HOTPLUG
310 int arch_add_memory(int nid
, u64 start
, u64 size
)
313 unsigned long start_pfn
= start
>> PAGE_SHIFT
;
314 unsigned long nr_pages
= size
>> PAGE_SHIFT
;
317 pgdat
= NODE_DATA(nid
);
319 /* We only have ZONE_NORMAL, so this is easy.. */
320 ret
= __add_pages(nid
, pgdat
->node_zones
+ ZONE_NORMAL
,
321 start_pfn
, nr_pages
);
323 printk("%s: Failed, __add_pages() == %d\n", __func__
, ret
);
327 EXPORT_SYMBOL_GPL(arch_add_memory
);
330 int memory_add_physaddr_to_nid(u64 addr
)
332 /* Node 0 for now.. */
335 EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid
);
338 #endif /* CONFIG_MEMORY_HOTPLUG */
341 int __in_29bit_mode(void)
343 return !(ctrl_inl(PMB_PASCR
) & PASCR_SE
);
345 #endif /* CONFIG_PMB */