3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
6 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
7 * Copyright (C) 1996 Paul Mackerras
8 * PPC44x/36-bit changes by Matt Porter (mporter@mvista.com)
10 * Derived from "arch/i386/mm/init.c"
11 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation; either version
16 * 2 of the License, or (at your option) any later version.
20 #include <linux/module.h>
21 #include <linux/sched.h>
22 #include <linux/kernel.h>
23 #include <linux/errno.h>
24 #include <linux/string.h>
25 #include <linux/types.h>
27 #include <linux/stddef.h>
28 #include <linux/init.h>
29 #include <linux/bootmem.h>
30 #include <linux/highmem.h>
31 #include <linux/initrd.h>
32 #include <linux/pagemap.h>
33 #include <linux/suspend.h>
34 #include <linux/lmb.h>
36 #include <asm/pgalloc.h>
39 #include <asm/mmu_context.h>
40 #include <asm/pgtable.h>
43 #include <asm/machdep.h>
44 #include <asm/btext.h>
46 #include <asm/sections.h>
47 #include <asm/sparsemem.h>
49 #include <asm/fixmap.h>
53 #ifndef CPU_FTR_COHERENT_ICACHE
54 #define CPU_FTR_COHERENT_ICACHE 0 /* XXX for now */
55 #define CPU_FTR_NOEXECUTE 0
58 int init_bootmem_done
;
60 phys_addr_t memory_limit
;
66 EXPORT_SYMBOL(kmap_prot
);
67 EXPORT_SYMBOL(kmap_pte
);
69 static inline pte_t
*virt_to_kpte(unsigned long vaddr
)
71 return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr
),
72 vaddr
), vaddr
), vaddr
);
76 int page_is_ram(unsigned long pfn
)
78 #ifndef CONFIG_PPC64 /* XXX for now */
81 unsigned long paddr
= (pfn
<< PAGE_SHIFT
);
83 for (i
=0; i
< lmb
.memory
.cnt
; i
++) {
86 base
= lmb
.memory
.region
[i
].base
;
88 if ((paddr
>= base
) &&
89 (paddr
< (base
+ lmb
.memory
.region
[i
].size
))) {
98 pgprot_t
phys_mem_access_prot(struct file
*file
, unsigned long pfn
,
99 unsigned long size
, pgprot_t vma_prot
)
101 if (ppc_md
.phys_mem_access_prot
)
102 return ppc_md
.phys_mem_access_prot(file
, pfn
, size
, vma_prot
);
104 if (!page_is_ram(pfn
))
105 vma_prot
= pgprot_noncached(vma_prot
);
109 EXPORT_SYMBOL(phys_mem_access_prot
);
111 #ifdef CONFIG_MEMORY_HOTPLUG
114 int memory_add_physaddr_to_nid(u64 start
)
116 return hot_add_scn_to_nid(start
);
120 int arch_add_memory(int nid
, u64 start
, u64 size
)
122 struct pglist_data
*pgdata
;
124 unsigned long start_pfn
= start
>> PAGE_SHIFT
;
125 unsigned long nr_pages
= size
>> PAGE_SHIFT
;
127 pgdata
= NODE_DATA(nid
);
129 start
= (unsigned long)__va(start
);
130 create_section_mapping(start
, start
+ size
);
132 /* this should work for most non-highmem platforms */
133 zone
= pgdata
->node_zones
;
135 return __add_pages(nid
, zone
, start_pfn
, nr_pages
);
137 #endif /* CONFIG_MEMORY_HOTPLUG */
140 * walk_memory_resource() needs to make sure there is no holes in a given
141 * memory range. PPC64 does not maintain the memory layout in /proc/iomem.
142 * Instead it maintains it in lmb.memory structures. Walk through the
143 * memory regions, find holes and callback for contiguous regions.
146 walk_system_ram_range(unsigned long start_pfn
, unsigned long nr_pages
,
147 void *arg
, int (*func
)(unsigned long, unsigned long, void *))
149 struct lmb_property res
;
150 unsigned long pfn
, len
;
154 res
.base
= (u64
) start_pfn
<< PAGE_SHIFT
;
155 res
.size
= (u64
) nr_pages
<< PAGE_SHIFT
;
157 end
= res
.base
+ res
.size
- 1;
158 while ((res
.base
< end
) && (lmb_find(&res
) >= 0)) {
159 pfn
= (unsigned long)(res
.base
>> PAGE_SHIFT
);
160 len
= (unsigned long)(res
.size
>> PAGE_SHIFT
);
161 ret
= (*func
)(pfn
, len
, arg
);
164 res
.base
+= (res
.size
+ 1);
165 res
.size
= (end
- res
.base
+ 1);
169 EXPORT_SYMBOL_GPL(walk_system_ram_range
);
172 * Initialize the bootmem system and give it all the memory we
173 * have available. If we are using highmem, we only put the
174 * lowmem into the bootmem system.
176 #ifndef CONFIG_NEED_MULTIPLE_NODES
177 void __init
do_init_bootmem(void)
180 unsigned long start
, bootmap_pages
;
181 unsigned long total_pages
;
184 max_low_pfn
= max_pfn
= lmb_end_of_DRAM() >> PAGE_SHIFT
;
185 total_pages
= (lmb_end_of_DRAM() - memstart_addr
) >> PAGE_SHIFT
;
186 #ifdef CONFIG_HIGHMEM
187 total_pages
= total_lowmem
>> PAGE_SHIFT
;
188 max_low_pfn
= lowmem_end_addr
>> PAGE_SHIFT
;
192 * Find an area to use for the bootmem bitmap. Calculate the size of
193 * bitmap required as (Total Memory) / PAGE_SIZE / BITS_PER_BYTE.
194 * Add 1 additional page in case the address isn't page-aligned.
196 bootmap_pages
= bootmem_bootmap_pages(total_pages
);
198 start
= lmb_alloc(bootmap_pages
<< PAGE_SHIFT
, PAGE_SIZE
);
200 min_low_pfn
= MEMORY_START
>> PAGE_SHIFT
;
201 boot_mapsize
= init_bootmem_node(NODE_DATA(0), start
>> PAGE_SHIFT
, min_low_pfn
, max_low_pfn
);
203 /* Add active regions with valid PFNs */
204 for (i
= 0; i
< lmb
.memory
.cnt
; i
++) {
205 unsigned long start_pfn
, end_pfn
;
206 start_pfn
= lmb
.memory
.region
[i
].base
>> PAGE_SHIFT
;
207 end_pfn
= start_pfn
+ lmb_size_pages(&lmb
.memory
, i
);
208 add_active_range(0, start_pfn
, end_pfn
);
211 /* Add all physical memory to the bootmem map, mark each area
214 #ifdef CONFIG_HIGHMEM
215 free_bootmem_with_active_regions(0, lowmem_end_addr
>> PAGE_SHIFT
);
217 /* reserve the sections we're already using */
218 for (i
= 0; i
< lmb
.reserved
.cnt
; i
++) {
219 unsigned long addr
= lmb
.reserved
.region
[i
].base
+
220 lmb_size_bytes(&lmb
.reserved
, i
) - 1;
221 if (addr
< lowmem_end_addr
)
222 reserve_bootmem(lmb
.reserved
.region
[i
].base
,
223 lmb_size_bytes(&lmb
.reserved
, i
),
225 else if (lmb
.reserved
.region
[i
].base
< lowmem_end_addr
) {
226 unsigned long adjusted_size
= lowmem_end_addr
-
227 lmb
.reserved
.region
[i
].base
;
228 reserve_bootmem(lmb
.reserved
.region
[i
].base
,
229 adjusted_size
, BOOTMEM_DEFAULT
);
233 free_bootmem_with_active_regions(0, max_pfn
);
235 /* reserve the sections we're already using */
236 for (i
= 0; i
< lmb
.reserved
.cnt
; i
++)
237 reserve_bootmem(lmb
.reserved
.region
[i
].base
,
238 lmb_size_bytes(&lmb
.reserved
, i
),
242 /* XXX need to clip this if using highmem? */
243 sparse_memory_present_with_active_regions(0);
245 init_bootmem_done
= 1;
248 /* mark pages that don't exist as nosave */
249 static int __init
mark_nonram_nosave(void)
251 unsigned long lmb_next_region_start_pfn
,
255 for (i
= 0; i
< lmb
.memory
.cnt
- 1; i
++) {
257 (lmb
.memory
.region
[i
].base
>> PAGE_SHIFT
) +
258 (lmb
.memory
.region
[i
].size
>> PAGE_SHIFT
);
259 lmb_next_region_start_pfn
=
260 lmb
.memory
.region
[i
+1].base
>> PAGE_SHIFT
;
262 if (lmb_region_max_pfn
< lmb_next_region_start_pfn
)
263 register_nosave_region(lmb_region_max_pfn
,
264 lmb_next_region_start_pfn
);
271 * paging_init() sets up the page tables - in fact we've already done this.
273 void __init
paging_init(void)
275 unsigned long total_ram
= lmb_phys_mem_size();
276 phys_addr_t top_of_ram
= lmb_end_of_DRAM();
277 unsigned long max_zone_pfns
[MAX_NR_ZONES
];
280 unsigned long v
= __fix_to_virt(__end_of_fixed_addresses
- 1);
281 unsigned long end
= __fix_to_virt(FIX_HOLE
);
283 for (; v
< end
; v
+= PAGE_SIZE
)
284 map_page(v
, 0, 0); /* XXX gross */
287 #ifdef CONFIG_HIGHMEM
288 map_page(PKMAP_BASE
, 0, 0); /* XXX gross */
289 pkmap_page_table
= virt_to_kpte(PKMAP_BASE
);
291 kmap_pte
= virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN
));
292 kmap_prot
= PAGE_KERNEL
;
293 #endif /* CONFIG_HIGHMEM */
295 printk(KERN_DEBUG
"Top of RAM: 0x%llx, Total RAM: 0x%lx\n",
296 (unsigned long long)top_of_ram
, total_ram
);
297 printk(KERN_DEBUG
"Memory hole size: %ldMB\n",
298 (long int)((top_of_ram
- total_ram
) >> 20));
299 memset(max_zone_pfns
, 0, sizeof(max_zone_pfns
));
300 #ifdef CONFIG_HIGHMEM
301 max_zone_pfns
[ZONE_DMA
] = lowmem_end_addr
>> PAGE_SHIFT
;
302 max_zone_pfns
[ZONE_HIGHMEM
] = top_of_ram
>> PAGE_SHIFT
;
304 max_zone_pfns
[ZONE_DMA
] = top_of_ram
>> PAGE_SHIFT
;
306 free_area_init_nodes(max_zone_pfns
);
308 mark_nonram_nosave();
310 #endif /* ! CONFIG_NEED_MULTIPLE_NODES */
312 void __init
mem_init(void)
314 #ifdef CONFIG_NEED_MULTIPLE_NODES
320 unsigned long reservedpages
= 0, codesize
, initsize
, datasize
, bsssize
;
322 num_physpages
= lmb
.memory
.size
>> PAGE_SHIFT
;
323 high_memory
= (void *) __va(max_low_pfn
* PAGE_SIZE
);
325 #ifdef CONFIG_NEED_MULTIPLE_NODES
326 for_each_online_node(nid
) {
327 if (NODE_DATA(nid
)->node_spanned_pages
!= 0) {
328 printk("freeing bootmem node %d\n", nid
);
330 free_all_bootmem_node(NODE_DATA(nid
));
335 totalram_pages
+= free_all_bootmem();
337 for_each_online_pgdat(pgdat
) {
338 for (i
= 0; i
< pgdat
->node_spanned_pages
; i
++) {
339 if (!pfn_valid(pgdat
->node_start_pfn
+ i
))
341 page
= pgdat_page_nr(pgdat
, i
);
342 if (PageReserved(page
))
347 codesize
= (unsigned long)&_sdata
- (unsigned long)&_stext
;
348 datasize
= (unsigned long)&_edata
- (unsigned long)&_sdata
;
349 initsize
= (unsigned long)&__init_end
- (unsigned long)&__init_begin
;
350 bsssize
= (unsigned long)&__bss_stop
- (unsigned long)&__bss_start
;
352 #ifdef CONFIG_HIGHMEM
354 unsigned long pfn
, highmem_mapnr
;
356 highmem_mapnr
= lowmem_end_addr
>> PAGE_SHIFT
;
357 for (pfn
= highmem_mapnr
; pfn
< max_mapnr
; ++pfn
) {
358 struct page
*page
= pfn_to_page(pfn
);
359 if (lmb_is_reserved(pfn
<< PAGE_SHIFT
))
361 ClearPageReserved(page
);
362 init_page_count(page
);
367 totalram_pages
+= totalhigh_pages
;
368 printk(KERN_DEBUG
"High memory: %luk\n",
369 totalhigh_pages
<< (PAGE_SHIFT
-10));
371 #endif /* CONFIG_HIGHMEM */
373 printk(KERN_INFO
"Memory: %luk/%luk available (%luk kernel code, "
374 "%luk reserved, %luk data, %luk bss, %luk init)\n",
375 nr_free_pages() << (PAGE_SHIFT
-10),
376 num_physpages
<< (PAGE_SHIFT
-10),
378 reservedpages
<< (PAGE_SHIFT
-10),
384 pr_info("Kernel virtual memory layout:\n");
385 pr_info(" * 0x%08lx..0x%08lx : fixmap\n", FIXADDR_START
, FIXADDR_TOP
);
386 #ifdef CONFIG_HIGHMEM
387 pr_info(" * 0x%08lx..0x%08lx : highmem PTEs\n",
388 PKMAP_BASE
, PKMAP_ADDR(LAST_PKMAP
));
389 #endif /* CONFIG_HIGHMEM */
390 #ifdef CONFIG_NOT_COHERENT_CACHE
391 pr_info(" * 0x%08lx..0x%08lx : consistent mem\n",
392 IOREMAP_TOP
, IOREMAP_TOP
+ CONFIG_CONSISTENT_SIZE
);
393 #endif /* CONFIG_NOT_COHERENT_CACHE */
394 pr_info(" * 0x%08lx..0x%08lx : early ioremap\n",
395 ioremap_bot
, IOREMAP_TOP
);
396 pr_info(" * 0x%08lx..0x%08lx : vmalloc & ioremap\n",
397 VMALLOC_START
, VMALLOC_END
);
398 #endif /* CONFIG_PPC32 */
404 * This is called when a page has been modified by the kernel.
405 * It just marks the page as not i-cache clean. We do the i-cache
406 * flush later when the page is given to a user process, if necessary.
408 void flush_dcache_page(struct page
*page
)
410 if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE
))
412 /* avoid an atomic op if possible */
413 if (test_bit(PG_arch_1
, &page
->flags
))
414 clear_bit(PG_arch_1
, &page
->flags
);
416 EXPORT_SYMBOL(flush_dcache_page
);
418 void flush_dcache_icache_page(struct page
*page
)
421 void *start
= kmap_atomic(page
, KM_PPC_SYNC_ICACHE
);
422 __flush_dcache_icache(start
);
423 kunmap_atomic(start
, KM_PPC_SYNC_ICACHE
);
424 #elif defined(CONFIG_8xx) || defined(CONFIG_PPC64)
425 /* On 8xx there is no need to kmap since highmem is not supported */
426 __flush_dcache_icache(page_address(page
));
428 __flush_dcache_icache_phys(page_to_pfn(page
) << PAGE_SHIFT
);
432 void clear_user_page(void *page
, unsigned long vaddr
, struct page
*pg
)
437 * We shouldnt have to do this, but some versions of glibc
438 * require it (ld.so assumes zero filled pages are icache clean)
441 flush_dcache_page(pg
);
443 EXPORT_SYMBOL(clear_user_page
);
445 void copy_user_page(void *vto
, void *vfrom
, unsigned long vaddr
,
448 copy_page(vto
, vfrom
);
451 * We should be able to use the following optimisation, however
452 * there are two problems.
453 * Firstly a bug in some versions of binutils meant PLT sections
454 * were not marked executable.
455 * Secondly the first word in the GOT section is blrl, used
456 * to establish the GOT address. Until recently the GOT was
457 * not marked executable.
461 if (!vma
->vm_file
&& ((vma
->vm_flags
& VM_EXEC
) == 0))
465 flush_dcache_page(pg
);
468 void flush_icache_user_range(struct vm_area_struct
*vma
, struct page
*page
,
469 unsigned long addr
, int len
)
473 maddr
= (unsigned long) kmap(page
) + (addr
& ~PAGE_MASK
);
474 flush_icache_range(maddr
, maddr
+ len
);
477 EXPORT_SYMBOL(flush_icache_user_range
);
480 * This is called at the end of handling a user page fault, when the
481 * fault has been handled by updating a PTE in the linux page tables.
482 * We use it to preload an HPTE into the hash table corresponding to
483 * the updated linux PTE.
485 * This must always be called with the pte lock held.
487 void update_mmu_cache(struct vm_area_struct
*vma
, unsigned long address
,
490 #ifdef CONFIG_PPC_STD_MMU
491 unsigned long access
= 0, trap
;
493 /* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
494 if (!pte_young(pte
) || address
>= TASK_SIZE
)
497 /* We try to figure out if we are coming from an instruction
498 * access fault and pass that down to __hash_page so we avoid
499 * double-faulting on execution of fresh text. We have to test
500 * for regs NULL since init will get here first thing at boot
502 * We also avoid filling the hash if not coming from a fault
504 if (current
->thread
.regs
== NULL
)
506 trap
= TRAP(current
->thread
.regs
);
508 access
|= _PAGE_EXEC
;
509 else if (trap
!= 0x300)
511 hash_preload(vma
->vm_mm
, address
, access
, trap
);
512 #endif /* CONFIG_PPC_STD_MMU */