3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
6 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
7 * Copyright (C) 1996 Paul Mackerras
8 * PPC44x/36-bit changes by Matt Porter (mporter@mvista.com)
10 * Derived from "arch/i386/mm/init.c"
11 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation; either version
16 * 2 of the License, or (at your option) any later version.
20 #include <linux/export.h>
21 #include <linux/sched.h>
22 #include <linux/kernel.h>
23 #include <linux/errno.h>
24 #include <linux/string.h>
25 #include <linux/gfp.h>
26 #include <linux/types.h>
28 #include <linux/stddef.h>
29 #include <linux/init.h>
30 #include <linux/bootmem.h>
31 #include <linux/highmem.h>
32 #include <linux/initrd.h>
33 #include <linux/pagemap.h>
34 #include <linux/suspend.h>
35 #include <linux/memblock.h>
36 #include <linux/hugetlb.h>
37 #include <linux/slab.h>
38 #include <linux/vmalloc.h>
40 #include <asm/pgalloc.h>
43 #include <asm/mmu_context.h>
44 #include <asm/pgtable.h>
47 #include <asm/machdep.h>
48 #include <asm/btext.h>
50 #include <asm/sections.h>
51 #include <asm/sparsemem.h>
53 #include <asm/fixmap.h>
54 #include <asm/swiotlb.h>
59 #ifndef CPU_FTR_COHERENT_ICACHE
60 #define CPU_FTR_COHERENT_ICACHE 0 /* XXX for now */
61 #define CPU_FTR_NOEXECUTE 0
64 unsigned long long memory_limit
;
68 EXPORT_SYMBOL(kmap_pte
);
70 EXPORT_SYMBOL(kmap_prot
);
71 #define TOP_ZONE ZONE_HIGHMEM
73 static inline pte_t
*virt_to_kpte(unsigned long vaddr
)
75 return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr
),
76 vaddr
), vaddr
), vaddr
);
79 #define TOP_ZONE ZONE_NORMAL
82 int page_is_ram(unsigned long pfn
)
84 #ifndef CONFIG_PPC64 /* XXX for now */
87 unsigned long paddr
= (pfn
<< PAGE_SHIFT
);
88 struct memblock_region
*reg
;
90 for_each_memblock(memory
, reg
)
91 if (paddr
>= reg
->base
&& paddr
< (reg
->base
+ reg
->size
))
97 pgprot_t
phys_mem_access_prot(struct file
*file
, unsigned long pfn
,
98 unsigned long size
, pgprot_t vma_prot
)
100 if (ppc_md
.phys_mem_access_prot
)
101 return ppc_md
.phys_mem_access_prot(file
, pfn
, size
, vma_prot
);
103 if (!page_is_ram(pfn
))
104 vma_prot
= pgprot_noncached(vma_prot
);
108 EXPORT_SYMBOL(phys_mem_access_prot
);
110 #ifdef CONFIG_MEMORY_HOTPLUG
113 int memory_add_physaddr_to_nid(u64 start
)
115 return hot_add_scn_to_nid(start
);
119 int __weak
create_section_mapping(unsigned long start
, unsigned long end
)
124 int __weak
remove_section_mapping(unsigned long start
, unsigned long end
)
129 int arch_add_memory(int nid
, u64 start
, u64 size
, bool for_device
)
131 struct pglist_data
*pgdata
;
133 unsigned long start_pfn
= start
>> PAGE_SHIFT
;
134 unsigned long nr_pages
= size
>> PAGE_SHIFT
;
137 pgdata
= NODE_DATA(nid
);
139 start
= (unsigned long)__va(start
);
140 rc
= create_section_mapping(start
, start
+ size
);
143 "Unable to create mapping for hot added memory 0x%llx..0x%llx: %d\n",
144 start
, start
+ size
, rc
);
148 /* this should work for most non-highmem platforms */
149 zone
= pgdata
->node_zones
+
150 zone_for_memory(nid
, start
, size
, 0, for_device
);
152 return __add_pages(nid
, zone
, start_pfn
, nr_pages
);
155 #ifdef CONFIG_MEMORY_HOTREMOVE
156 int arch_remove_memory(u64 start
, u64 size
)
158 unsigned long start_pfn
= start
>> PAGE_SHIFT
;
159 unsigned long nr_pages
= size
>> PAGE_SHIFT
;
163 zone
= page_zone(pfn_to_page(start_pfn
));
164 ret
= __remove_pages(zone
, start_pfn
, nr_pages
);
168 /* Remove htab bolted mappings for this section of memory */
169 start
= (unsigned long)__va(start
);
170 ret
= remove_section_mapping(start
, start
+ size
);
172 /* Ensure all vmalloc mappings are flushed in case they also
173 * hit that section of memory
180 #endif /* CONFIG_MEMORY_HOTPLUG */
183 * walk_memory_resource() needs to make sure there is no holes in a given
184 * memory range. PPC64 does not maintain the memory layout in /proc/iomem.
185 * Instead it maintains it in memblock.memory structures. Walk through the
186 * memory regions, find holes and callback for contiguous regions.
189 walk_system_ram_range(unsigned long start_pfn
, unsigned long nr_pages
,
190 void *arg
, int (*func
)(unsigned long, unsigned long, void *))
192 struct memblock_region
*reg
;
193 unsigned long end_pfn
= start_pfn
+ nr_pages
;
194 unsigned long tstart
, tend
;
197 for_each_memblock(memory
, reg
) {
198 tstart
= max(start_pfn
, memblock_region_memory_base_pfn(reg
));
199 tend
= min(end_pfn
, memblock_region_memory_end_pfn(reg
));
202 ret
= (*func
)(tstart
, tend
- tstart
, arg
);
208 EXPORT_SYMBOL_GPL(walk_system_ram_range
);
210 #ifndef CONFIG_NEED_MULTIPLE_NODES
211 void __init
initmem_init(void)
213 max_low_pfn
= max_pfn
= memblock_end_of_DRAM() >> PAGE_SHIFT
;
214 min_low_pfn
= MEMORY_START
>> PAGE_SHIFT
;
215 #ifdef CONFIG_HIGHMEM
216 max_low_pfn
= lowmem_end_addr
>> PAGE_SHIFT
;
219 /* Place all memblock_regions in the same node and merge contiguous
222 memblock_set_node(0, (phys_addr_t
)ULLONG_MAX
, &memblock
.memory
, 0);
224 /* XXX need to clip this if using highmem? */
225 sparse_memory_present_with_active_regions(0);
229 /* mark pages that don't exist as nosave */
230 static int __init
mark_nonram_nosave(void)
232 struct memblock_region
*reg
, *prev
= NULL
;
234 for_each_memblock(memory
, reg
) {
236 memblock_region_memory_end_pfn(prev
) < memblock_region_memory_base_pfn(reg
))
237 register_nosave_region(memblock_region_memory_end_pfn(prev
),
238 memblock_region_memory_base_pfn(reg
));
243 #else /* CONFIG_NEED_MULTIPLE_NODES */
244 static int __init
mark_nonram_nosave(void)
250 static bool zone_limits_final
;
253 * The memory zones past TOP_ZONE are managed by generic mm code.
254 * These should be set to zero since that's what every other
257 static unsigned long max_zone_pfns
[MAX_NR_ZONES
] = {
258 [0 ... TOP_ZONE
] = ~0UL,
259 [TOP_ZONE
+ 1 ... MAX_NR_ZONES
- 1] = 0
263 * Restrict the specified zone and all more restrictive zones
264 * to be below the specified pfn. May not be called after
267 void __init
limit_zone_pfn(enum zone_type zone
, unsigned long pfn_limit
)
271 if (WARN_ON(zone_limits_final
))
274 for (i
= zone
; i
>= 0; i
--) {
275 if (max_zone_pfns
[i
] > pfn_limit
)
276 max_zone_pfns
[i
] = pfn_limit
;
281 * Find the least restrictive zone that is entirely below the
282 * specified pfn limit. Returns < 0 if no suitable zone is found.
284 * pfn_limit must be u64 because it can exceed 32 bits even on 32-bit
285 * systems -- the DMA limit can be higher than any possible real pfn.
287 int dma_pfn_limit_to_zone(u64 pfn_limit
)
291 for (i
= TOP_ZONE
; i
>= 0; i
--) {
292 if (max_zone_pfns
[i
] <= pfn_limit
)
300 * paging_init() sets up the page tables - in fact we've already done this.
302 void __init
paging_init(void)
304 unsigned long long total_ram
= memblock_phys_mem_size();
305 phys_addr_t top_of_ram
= memblock_end_of_DRAM();
308 unsigned long v
= __fix_to_virt(__end_of_fixed_addresses
- 1);
309 unsigned long end
= __fix_to_virt(FIX_HOLE
);
311 for (; v
< end
; v
+= PAGE_SIZE
)
312 map_page(v
, 0, 0); /* XXX gross */
315 #ifdef CONFIG_HIGHMEM
316 map_page(PKMAP_BASE
, 0, 0); /* XXX gross */
317 pkmap_page_table
= virt_to_kpte(PKMAP_BASE
);
319 kmap_pte
= virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN
));
320 kmap_prot
= PAGE_KERNEL
;
321 #endif /* CONFIG_HIGHMEM */
323 printk(KERN_DEBUG
"Top of RAM: 0x%llx, Total RAM: 0x%llx\n",
324 (unsigned long long)top_of_ram
, total_ram
);
325 printk(KERN_DEBUG
"Memory hole size: %ldMB\n",
326 (long int)((top_of_ram
- total_ram
) >> 20));
328 #ifdef CONFIG_HIGHMEM
329 limit_zone_pfn(ZONE_NORMAL
, lowmem_end_addr
>> PAGE_SHIFT
);
331 limit_zone_pfn(TOP_ZONE
, top_of_ram
>> PAGE_SHIFT
);
332 zone_limits_final
= true;
333 free_area_init_nodes(max_zone_pfns
);
335 mark_nonram_nosave();
338 void __init
mem_init(void)
341 * book3s is limited to 16 page sizes due to encoding this in
342 * a 4-bit field for slices.
344 BUILD_BUG_ON(MMU_PAGE_COUNT
> 16);
346 #ifdef CONFIG_SWIOTLB
350 high_memory
= (void *) __va(max_low_pfn
* PAGE_SIZE
);
351 set_max_mapnr(max_pfn
);
354 #ifdef CONFIG_HIGHMEM
356 unsigned long pfn
, highmem_mapnr
;
358 highmem_mapnr
= lowmem_end_addr
>> PAGE_SHIFT
;
359 for (pfn
= highmem_mapnr
; pfn
< max_mapnr
; ++pfn
) {
360 phys_addr_t paddr
= (phys_addr_t
)pfn
<< PAGE_SHIFT
;
361 struct page
*page
= pfn_to_page(pfn
);
362 if (!memblock_is_reserved(paddr
))
363 free_highmem_page(page
);
366 #endif /* CONFIG_HIGHMEM */
368 #if defined(CONFIG_PPC_FSL_BOOK3E) && !defined(CONFIG_SMP)
370 * If smp is enabled, next_tlbcam_idx is initialized in the cpu up
371 * functions.... do it here for the non-smp case.
373 per_cpu(next_tlbcam_idx
, smp_processor_id()) =
374 (mfspr(SPRN_TLB1CFG
) & TLBnCFG_N_ENTRY
) - 1;
377 mem_init_print_info(NULL
);
379 pr_info("Kernel virtual memory layout:\n");
380 pr_info(" * 0x%08lx..0x%08lx : fixmap\n", FIXADDR_START
, FIXADDR_TOP
);
381 #ifdef CONFIG_HIGHMEM
382 pr_info(" * 0x%08lx..0x%08lx : highmem PTEs\n",
383 PKMAP_BASE
, PKMAP_ADDR(LAST_PKMAP
));
384 #endif /* CONFIG_HIGHMEM */
385 #ifdef CONFIG_NOT_COHERENT_CACHE
386 pr_info(" * 0x%08lx..0x%08lx : consistent mem\n",
387 IOREMAP_TOP
, IOREMAP_TOP
+ CONFIG_CONSISTENT_SIZE
);
388 #endif /* CONFIG_NOT_COHERENT_CACHE */
389 pr_info(" * 0x%08lx..0x%08lx : early ioremap\n",
390 ioremap_bot
, IOREMAP_TOP
);
391 pr_info(" * 0x%08lx..0x%08lx : vmalloc & ioremap\n",
392 VMALLOC_START
, VMALLOC_END
);
393 #endif /* CONFIG_PPC32 */
396 void free_initmem(void)
398 ppc_md
.progress
= ppc_printk_progress
;
399 free_initmem_default(POISON_FREE_INITMEM
);
402 #ifdef CONFIG_BLK_DEV_INITRD
403 void __init
free_initrd_mem(unsigned long start
, unsigned long end
)
405 free_reserved_area((void *)start
, (void *)end
, -1, "initrd");
410 * This is called when a page has been modified by the kernel.
411 * It just marks the page as not i-cache clean. We do the i-cache
412 * flush later when the page is given to a user process, if necessary.
414 void flush_dcache_page(struct page
*page
)
416 if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE
))
418 /* avoid an atomic op if possible */
419 if (test_bit(PG_arch_1
, &page
->flags
))
420 clear_bit(PG_arch_1
, &page
->flags
);
422 EXPORT_SYMBOL(flush_dcache_page
);
424 void flush_dcache_icache_page(struct page
*page
)
426 #ifdef CONFIG_HUGETLB_PAGE
427 if (PageCompound(page
)) {
428 flush_dcache_icache_hugepage(page
);
432 #if defined(CONFIG_8xx) || defined(CONFIG_PPC64)
433 /* On 8xx there is no need to kmap since highmem is not supported */
434 __flush_dcache_icache(page_address(page
));
436 if (IS_ENABLED(CONFIG_BOOKE
) || sizeof(phys_addr_t
) > sizeof(void *)) {
437 void *start
= kmap_atomic(page
);
438 __flush_dcache_icache(start
);
439 kunmap_atomic(start
);
441 __flush_dcache_icache_phys(page_to_pfn(page
) << PAGE_SHIFT
);
445 EXPORT_SYMBOL(flush_dcache_icache_page
);
447 void clear_user_page(void *page
, unsigned long vaddr
, struct page
*pg
)
452 * We shouldn't have to do this, but some versions of glibc
453 * require it (ld.so assumes zero filled pages are icache clean)
456 flush_dcache_page(pg
);
458 EXPORT_SYMBOL(clear_user_page
);
460 void copy_user_page(void *vto
, void *vfrom
, unsigned long vaddr
,
463 copy_page(vto
, vfrom
);
466 * We should be able to use the following optimisation, however
467 * there are two problems.
468 * Firstly a bug in some versions of binutils meant PLT sections
469 * were not marked executable.
470 * Secondly the first word in the GOT section is blrl, used
471 * to establish the GOT address. Until recently the GOT was
472 * not marked executable.
476 if (!vma
->vm_file
&& ((vma
->vm_flags
& VM_EXEC
) == 0))
480 flush_dcache_page(pg
);
483 void flush_icache_user_range(struct vm_area_struct
*vma
, struct page
*page
,
484 unsigned long addr
, int len
)
488 maddr
= (unsigned long) kmap(page
) + (addr
& ~PAGE_MASK
);
489 flush_icache_range(maddr
, maddr
+ len
);
492 EXPORT_SYMBOL(flush_icache_user_range
);
495 * This is called at the end of handling a user page fault, when the
496 * fault has been handled by updating a PTE in the linux page tables.
497 * We use it to preload an HPTE into the hash table corresponding to
498 * the updated linux PTE.
500 * This must always be called with the pte lock held.
502 void update_mmu_cache(struct vm_area_struct
*vma
, unsigned long address
,
505 #ifdef CONFIG_PPC_STD_MMU
507 * We don't need to worry about _PAGE_PRESENT here because we are
508 * called with either mm->page_table_lock held or ptl lock held
510 unsigned long access
, trap
;
515 /* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
516 if (!pte_young(*ptep
) || address
>= TASK_SIZE
)
519 /* We try to figure out if we are coming from an instruction
520 * access fault and pass that down to __hash_page so we avoid
521 * double-faulting on execution of fresh text. We have to test
522 * for regs NULL since init will get here first thing at boot
524 * We also avoid filling the hash if not coming from a fault
527 trap
= current
->thread
.regs
? TRAP(current
->thread
.regs
) : 0UL;
539 hash_preload(vma
->vm_mm
, address
, access
, trap
);
540 #endif /* CONFIG_PPC_STD_MMU */
541 #if (defined(CONFIG_PPC_BOOK3E_64) || defined(CONFIG_PPC_FSL_BOOK3E)) \
542 && defined(CONFIG_HUGETLB_PAGE)
543 if (is_vm_hugetlb_page(vma
))
544 book3e_hugetlb_preload(vma
, address
, *ptep
);
549 * System memory should not be in /proc/iomem but various tools expect it
552 static int __init
add_system_ram_resources(void)
554 struct memblock_region
*reg
;
556 for_each_memblock(memory
, reg
) {
557 struct resource
*res
;
558 unsigned long base
= reg
->base
;
559 unsigned long size
= reg
->size
;
561 res
= kzalloc(sizeof(struct resource
), GFP_KERNEL
);
565 res
->name
= "System RAM";
567 res
->end
= base
+ size
- 1;
568 res
->flags
= IORESOURCE_SYSTEM_RAM
| IORESOURCE_BUSY
;
569 WARN_ON(request_resource(&iomem_resource
, res
) < 0);
575 subsys_initcall(add_system_ram_resources
);
577 #ifdef CONFIG_STRICT_DEVMEM
579 * devmem_is_allowed(): check to see if /dev/mem access to a certain address
580 * is valid. The argument is a physical page number.
582 * Access has to be given to non-kernel-ram areas as well, these contain the
583 * PCI mmio resources as well as potential bios/acpi data regions.
585 int devmem_is_allowed(unsigned long pfn
)
587 if (page_is_rtas_user_buf(pfn
))
589 if (iomem_is_exclusive(PFN_PHYS(pfn
)))
591 if (!page_is_ram(pfn
))
595 #endif /* CONFIG_STRICT_DEVMEM */