3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
6 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
7 * Copyright (C) 1996 Paul Mackerras
8 * PPC44x/36-bit changes by Matt Porter (mporter@mvista.com)
10 * Derived from "arch/i386/mm/init.c"
11 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation; either version
16 * 2 of the License, or (at your option) any later version.
20 #include <linux/export.h>
21 #include <linux/sched.h>
22 #include <linux/kernel.h>
23 #include <linux/errno.h>
24 #include <linux/string.h>
25 #include <linux/gfp.h>
26 #include <linux/types.h>
28 #include <linux/stddef.h>
29 #include <linux/init.h>
30 #include <linux/bootmem.h>
31 #include <linux/highmem.h>
32 #include <linux/initrd.h>
33 #include <linux/pagemap.h>
34 #include <linux/suspend.h>
35 #include <linux/memblock.h>
36 #include <linux/hugetlb.h>
37 #include <linux/slab.h>
38 #include <linux/vmalloc.h>
40 #include <asm/pgalloc.h>
43 #include <asm/mmu_context.h>
44 #include <asm/pgtable.h>
47 #include <asm/machdep.h>
48 #include <asm/btext.h>
50 #include <asm/sections.h>
51 #include <asm/sparsemem.h>
53 #include <asm/fixmap.h>
54 #include <asm/swiotlb.h>
59 #ifndef CPU_FTR_COHERENT_ICACHE
60 #define CPU_FTR_COHERENT_ICACHE 0 /* XXX for now */
61 #define CPU_FTR_NOEXECUTE 0
64 unsigned long long memory_limit
;
68 EXPORT_SYMBOL(kmap_pte
);
70 EXPORT_SYMBOL(kmap_prot
);
72 static inline pte_t
*virt_to_kpte(unsigned long vaddr
)
74 return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr
),
75 vaddr
), vaddr
), vaddr
);
79 int page_is_ram(unsigned long pfn
)
81 #ifndef CONFIG_PPC64 /* XXX for now */
84 unsigned long paddr
= (pfn
<< PAGE_SHIFT
);
85 struct memblock_region
*reg
;
87 for_each_memblock(memory
, reg
)
88 if (paddr
>= reg
->base
&& paddr
< (reg
->base
+ reg
->size
))
94 pgprot_t
phys_mem_access_prot(struct file
*file
, unsigned long pfn
,
95 unsigned long size
, pgprot_t vma_prot
)
97 if (ppc_md
.phys_mem_access_prot
)
98 return ppc_md
.phys_mem_access_prot(file
, pfn
, size
, vma_prot
);
100 if (!page_is_ram(pfn
))
101 vma_prot
= pgprot_noncached(vma_prot
);
105 EXPORT_SYMBOL(phys_mem_access_prot
);
107 #ifdef CONFIG_MEMORY_HOTPLUG
110 int memory_add_physaddr_to_nid(u64 start
)
112 return hot_add_scn_to_nid(start
);
116 int arch_add_memory(int nid
, u64 start
, u64 size
)
118 struct pglist_data
*pgdata
;
120 unsigned long start_pfn
= start
>> PAGE_SHIFT
;
121 unsigned long nr_pages
= size
>> PAGE_SHIFT
;
123 pgdata
= NODE_DATA(nid
);
125 start
= (unsigned long)__va(start
);
126 if (create_section_mapping(start
, start
+ size
))
129 /* this should work for most non-highmem platforms */
130 zone
= pgdata
->node_zones
+
131 zone_for_memory(nid
, start
, size
, 0);
133 return __add_pages(nid
, zone
, start_pfn
, nr_pages
);
136 #ifdef CONFIG_MEMORY_HOTREMOVE
137 int arch_remove_memory(u64 start
, u64 size
)
139 unsigned long start_pfn
= start
>> PAGE_SHIFT
;
140 unsigned long nr_pages
= size
>> PAGE_SHIFT
;
144 zone
= page_zone(pfn_to_page(start_pfn
));
145 ret
= __remove_pages(zone
, start_pfn
, nr_pages
);
149 /* Remove htab bolted mappings for this section of memory */
150 start
= (unsigned long)__va(start
);
151 ret
= remove_section_mapping(start
, start
+ size
);
153 /* Ensure all vmalloc mappings are flushed in case they also
154 * hit that section of memory
161 #endif /* CONFIG_MEMORY_HOTPLUG */
164 * walk_memory_resource() needs to make sure there is no holes in a given
165 * memory range. PPC64 does not maintain the memory layout in /proc/iomem.
166 * Instead it maintains it in memblock.memory structures. Walk through the
167 * memory regions, find holes and callback for contiguous regions.
170 walk_system_ram_range(unsigned long start_pfn
, unsigned long nr_pages
,
171 void *arg
, int (*func
)(unsigned long, unsigned long, void *))
173 struct memblock_region
*reg
;
174 unsigned long end_pfn
= start_pfn
+ nr_pages
;
175 unsigned long tstart
, tend
;
178 for_each_memblock(memory
, reg
) {
179 tstart
= max(start_pfn
, memblock_region_memory_base_pfn(reg
));
180 tend
= min(end_pfn
, memblock_region_memory_end_pfn(reg
));
183 ret
= (*func
)(tstart
, tend
- tstart
, arg
);
189 EXPORT_SYMBOL_GPL(walk_system_ram_range
);
191 #ifndef CONFIG_NEED_MULTIPLE_NODES
192 void __init
initmem_init(void)
194 max_low_pfn
= max_pfn
= memblock_end_of_DRAM() >> PAGE_SHIFT
;
195 min_low_pfn
= MEMORY_START
>> PAGE_SHIFT
;
196 #ifdef CONFIG_HIGHMEM
197 max_low_pfn
= lowmem_end_addr
>> PAGE_SHIFT
;
200 /* Place all memblock_regions in the same node and merge contiguous
203 memblock_set_node(0, (phys_addr_t
)ULLONG_MAX
, &memblock
.memory
, 0);
205 /* XXX need to clip this if using highmem? */
206 sparse_memory_present_with_active_regions(0);
210 /* mark pages that don't exist as nosave */
211 static int __init
mark_nonram_nosave(void)
213 struct memblock_region
*reg
, *prev
= NULL
;
215 for_each_memblock(memory
, reg
) {
217 memblock_region_memory_end_pfn(prev
) < memblock_region_memory_base_pfn(reg
))
218 register_nosave_region(memblock_region_memory_end_pfn(prev
),
219 memblock_region_memory_base_pfn(reg
));
224 #else /* CONFIG_NEED_MULTIPLE_NODES */
225 static int __init
mark_nonram_nosave(void)
231 static bool zone_limits_final
;
233 static unsigned long max_zone_pfns
[MAX_NR_ZONES
] = {
234 [0 ... MAX_NR_ZONES
- 1] = ~0UL
238 * Restrict the specified zone and all more restrictive zones
239 * to be below the specified pfn. May not be called after
242 void __init
limit_zone_pfn(enum zone_type zone
, unsigned long pfn_limit
)
246 if (WARN_ON(zone_limits_final
))
249 for (i
= zone
; i
>= 0; i
--) {
250 if (max_zone_pfns
[i
] > pfn_limit
)
251 max_zone_pfns
[i
] = pfn_limit
;
256 * Find the least restrictive zone that is entirely below the
257 * specified pfn limit. Returns < 0 if no suitable zone is found.
259 * pfn_limit must be u64 because it can exceed 32 bits even on 32-bit
260 * systems -- the DMA limit can be higher than any possible real pfn.
262 int dma_pfn_limit_to_zone(u64 pfn_limit
)
264 enum zone_type top_zone
= ZONE_NORMAL
;
267 #ifdef CONFIG_HIGHMEM
268 top_zone
= ZONE_HIGHMEM
;
271 for (i
= top_zone
; i
>= 0; i
--) {
272 if (max_zone_pfns
[i
] <= pfn_limit
)
280 * paging_init() sets up the page tables - in fact we've already done this.
282 void __init
paging_init(void)
284 unsigned long long total_ram
= memblock_phys_mem_size();
285 phys_addr_t top_of_ram
= memblock_end_of_DRAM();
286 enum zone_type top_zone
;
289 unsigned long v
= __fix_to_virt(__end_of_fixed_addresses
- 1);
290 unsigned long end
= __fix_to_virt(FIX_HOLE
);
292 for (; v
< end
; v
+= PAGE_SIZE
)
293 map_page(v
, 0, 0); /* XXX gross */
296 #ifdef CONFIG_HIGHMEM
297 map_page(PKMAP_BASE
, 0, 0); /* XXX gross */
298 pkmap_page_table
= virt_to_kpte(PKMAP_BASE
);
300 kmap_pte
= virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN
));
301 kmap_prot
= PAGE_KERNEL
;
302 #endif /* CONFIG_HIGHMEM */
304 printk(KERN_DEBUG
"Top of RAM: 0x%llx, Total RAM: 0x%llx\n",
305 (unsigned long long)top_of_ram
, total_ram
);
306 printk(KERN_DEBUG
"Memory hole size: %ldMB\n",
307 (long int)((top_of_ram
- total_ram
) >> 20));
309 #ifdef CONFIG_HIGHMEM
310 top_zone
= ZONE_HIGHMEM
;
311 limit_zone_pfn(ZONE_NORMAL
, lowmem_end_addr
>> PAGE_SHIFT
);
313 top_zone
= ZONE_NORMAL
;
316 limit_zone_pfn(top_zone
, top_of_ram
>> PAGE_SHIFT
);
317 zone_limits_final
= true;
318 free_area_init_nodes(max_zone_pfns
);
320 mark_nonram_nosave();
323 void __init
mem_init(void)
326 * book3s is limited to 16 page sizes due to encoding this in
327 * a 4-bit field for slices.
329 BUILD_BUG_ON(MMU_PAGE_COUNT
> 16);
331 #ifdef CONFIG_SWIOTLB
335 high_memory
= (void *) __va(max_low_pfn
* PAGE_SIZE
);
336 set_max_mapnr(max_pfn
);
339 #ifdef CONFIG_HIGHMEM
341 unsigned long pfn
, highmem_mapnr
;
343 highmem_mapnr
= lowmem_end_addr
>> PAGE_SHIFT
;
344 for (pfn
= highmem_mapnr
; pfn
< max_mapnr
; ++pfn
) {
345 phys_addr_t paddr
= (phys_addr_t
)pfn
<< PAGE_SHIFT
;
346 struct page
*page
= pfn_to_page(pfn
);
347 if (!memblock_is_reserved(paddr
))
348 free_highmem_page(page
);
351 #endif /* CONFIG_HIGHMEM */
353 #if defined(CONFIG_PPC_FSL_BOOK3E) && !defined(CONFIG_SMP)
355 * If smp is enabled, next_tlbcam_idx is initialized in the cpu up
356 * functions.... do it here for the non-smp case.
358 per_cpu(next_tlbcam_idx
, smp_processor_id()) =
359 (mfspr(SPRN_TLB1CFG
) & TLBnCFG_N_ENTRY
) - 1;
362 mem_init_print_info(NULL
);
364 pr_info("Kernel virtual memory layout:\n");
365 pr_info(" * 0x%08lx..0x%08lx : fixmap\n", FIXADDR_START
, FIXADDR_TOP
);
366 #ifdef CONFIG_HIGHMEM
367 pr_info(" * 0x%08lx..0x%08lx : highmem PTEs\n",
368 PKMAP_BASE
, PKMAP_ADDR(LAST_PKMAP
));
369 #endif /* CONFIG_HIGHMEM */
370 #ifdef CONFIG_NOT_COHERENT_CACHE
371 pr_info(" * 0x%08lx..0x%08lx : consistent mem\n",
372 IOREMAP_TOP
, IOREMAP_TOP
+ CONFIG_CONSISTENT_SIZE
);
373 #endif /* CONFIG_NOT_COHERENT_CACHE */
374 pr_info(" * 0x%08lx..0x%08lx : early ioremap\n",
375 ioremap_bot
, IOREMAP_TOP
);
376 pr_info(" * 0x%08lx..0x%08lx : vmalloc & ioremap\n",
377 VMALLOC_START
, VMALLOC_END
);
378 #endif /* CONFIG_PPC32 */
381 void free_initmem(void)
383 ppc_md
.progress
= ppc_printk_progress
;
384 free_initmem_default(POISON_FREE_INITMEM
);
387 #ifdef CONFIG_BLK_DEV_INITRD
388 void __init
free_initrd_mem(unsigned long start
, unsigned long end
)
390 free_reserved_area((void *)start
, (void *)end
, -1, "initrd");
395 * This is called when a page has been modified by the kernel.
396 * It just marks the page as not i-cache clean. We do the i-cache
397 * flush later when the page is given to a user process, if necessary.
399 void flush_dcache_page(struct page
*page
)
401 if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE
))
403 /* avoid an atomic op if possible */
404 if (test_bit(PG_arch_1
, &page
->flags
))
405 clear_bit(PG_arch_1
, &page
->flags
);
407 EXPORT_SYMBOL(flush_dcache_page
);
409 void flush_dcache_icache_page(struct page
*page
)
411 #ifdef CONFIG_HUGETLB_PAGE
412 if (PageCompound(page
)) {
413 flush_dcache_icache_hugepage(page
);
419 void *start
= kmap_atomic(page
);
420 __flush_dcache_icache(start
);
421 kunmap_atomic(start
);
423 #elif defined(CONFIG_8xx) || defined(CONFIG_PPC64)
424 /* On 8xx there is no need to kmap since highmem is not supported */
425 __flush_dcache_icache(page_address(page
));
427 __flush_dcache_icache_phys(page_to_pfn(page
) << PAGE_SHIFT
);
430 EXPORT_SYMBOL(flush_dcache_icache_page
);
432 void clear_user_page(void *page
, unsigned long vaddr
, struct page
*pg
)
437 * We shouldn't have to do this, but some versions of glibc
438 * require it (ld.so assumes zero filled pages are icache clean)
441 flush_dcache_page(pg
);
443 EXPORT_SYMBOL(clear_user_page
);
445 void copy_user_page(void *vto
, void *vfrom
, unsigned long vaddr
,
448 copy_page(vto
, vfrom
);
451 * We should be able to use the following optimisation, however
452 * there are two problems.
453 * Firstly a bug in some versions of binutils meant PLT sections
454 * were not marked executable.
455 * Secondly the first word in the GOT section is blrl, used
456 * to establish the GOT address. Until recently the GOT was
457 * not marked executable.
461 if (!vma
->vm_file
&& ((vma
->vm_flags
& VM_EXEC
) == 0))
465 flush_dcache_page(pg
);
468 void flush_icache_user_range(struct vm_area_struct
*vma
, struct page
*page
,
469 unsigned long addr
, int len
)
473 maddr
= (unsigned long) kmap(page
) + (addr
& ~PAGE_MASK
);
474 flush_icache_range(maddr
, maddr
+ len
);
477 EXPORT_SYMBOL(flush_icache_user_range
);
480 * This is called at the end of handling a user page fault, when the
481 * fault has been handled by updating a PTE in the linux page tables.
482 * We use it to preload an HPTE into the hash table corresponding to
483 * the updated linux PTE.
485 * This must always be called with the pte lock held.
487 void update_mmu_cache(struct vm_area_struct
*vma
, unsigned long address
,
490 #ifdef CONFIG_PPC_STD_MMU
492 * We don't need to worry about _PAGE_PRESENT here because we are
493 * called with either mm->page_table_lock held or ptl lock held
495 unsigned long access
= 0, trap
;
497 /* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
498 if (!pte_young(*ptep
) || address
>= TASK_SIZE
)
501 /* We try to figure out if we are coming from an instruction
502 * access fault and pass that down to __hash_page so we avoid
503 * double-faulting on execution of fresh text. We have to test
504 * for regs NULL since init will get here first thing at boot
506 * We also avoid filling the hash if not coming from a fault
508 if (current
->thread
.regs
== NULL
)
510 trap
= TRAP(current
->thread
.regs
);
512 access
|= _PAGE_EXEC
;
513 else if (trap
!= 0x300)
515 hash_preload(vma
->vm_mm
, address
, access
, trap
);
516 #endif /* CONFIG_PPC_STD_MMU */
517 #if (defined(CONFIG_PPC_BOOK3E_64) || defined(CONFIG_PPC_FSL_BOOK3E)) \
518 && defined(CONFIG_HUGETLB_PAGE)
519 if (is_vm_hugetlb_page(vma
))
520 book3e_hugetlb_preload(vma
, address
, *ptep
);
525 * System memory should not be in /proc/iomem but various tools expect it
528 static int __init
add_system_ram_resources(void)
530 struct memblock_region
*reg
;
532 for_each_memblock(memory
, reg
) {
533 struct resource
*res
;
534 unsigned long base
= reg
->base
;
535 unsigned long size
= reg
->size
;
537 res
= kzalloc(sizeof(struct resource
), GFP_KERNEL
);
541 res
->name
= "System RAM";
543 res
->end
= base
+ size
- 1;
544 res
->flags
= IORESOURCE_MEM
| IORESOURCE_BUSY
;
545 WARN_ON(request_resource(&iomem_resource
, res
) < 0);
551 subsys_initcall(add_system_ram_resources
);
553 #ifdef CONFIG_STRICT_DEVMEM
555 * devmem_is_allowed(): check to see if /dev/mem access to a certain address
556 * is valid. The argument is a physical page number.
558 * Access has to be given to non-kernel-ram areas as well, these contain the
559 * PCI mmio resources as well as potential bios/acpi data regions.
561 int devmem_is_allowed(unsigned long pfn
)
563 if (iomem_is_exclusive(pfn
<< PAGE_SHIFT
))
565 if (!page_is_ram(pfn
))
567 if (page_is_rtas_user_buf(pfn
))
571 #endif /* CONFIG_STRICT_DEVMEM */