3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
6 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
7 * Copyright (C) 1996 Paul Mackerras
8 * PPC44x/36-bit changes by Matt Porter (mporter@mvista.com)
10 * Derived from "arch/i386/mm/init.c"
11 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation; either version
16 * 2 of the License, or (at your option) any later version.
20 #include <linux/export.h>
21 #include <linux/sched.h>
22 #include <linux/kernel.h>
23 #include <linux/errno.h>
24 #include <linux/string.h>
25 #include <linux/gfp.h>
26 #include <linux/types.h>
28 #include <linux/stddef.h>
29 #include <linux/init.h>
30 #include <linux/bootmem.h>
31 #include <linux/highmem.h>
32 #include <linux/initrd.h>
33 #include <linux/pagemap.h>
34 #include <linux/suspend.h>
35 #include <linux/memblock.h>
36 #include <linux/hugetlb.h>
37 #include <linux/slab.h>
38 #include <linux/vmalloc.h>
40 #include <asm/pgalloc.h>
43 #include <asm/mmu_context.h>
44 #include <asm/pgtable.h>
47 #include <asm/machdep.h>
48 #include <asm/btext.h>
50 #include <asm/sections.h>
51 #include <asm/sparsemem.h>
53 #include <asm/fixmap.h>
54 #include <asm/swiotlb.h>
59 #ifndef CPU_FTR_COHERENT_ICACHE
60 #define CPU_FTR_COHERENT_ICACHE 0 /* XXX for now */
61 #define CPU_FTR_NOEXECUTE 0
64 unsigned long long memory_limit
;
65 bool init_mem_is_free
;
69 EXPORT_SYMBOL(kmap_pte
);
71 EXPORT_SYMBOL(kmap_prot
);
72 #define TOP_ZONE ZONE_HIGHMEM
74 static inline pte_t
*virt_to_kpte(unsigned long vaddr
)
76 return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr
),
77 vaddr
), vaddr
), vaddr
);
80 #define TOP_ZONE ZONE_NORMAL
83 int page_is_ram(unsigned long pfn
)
85 #ifndef CONFIG_PPC64 /* XXX for now */
88 unsigned long paddr
= (pfn
<< PAGE_SHIFT
);
89 struct memblock_region
*reg
;
91 for_each_memblock(memory
, reg
)
92 if (paddr
>= reg
->base
&& paddr
< (reg
->base
+ reg
->size
))
98 pgprot_t
phys_mem_access_prot(struct file
*file
, unsigned long pfn
,
99 unsigned long size
, pgprot_t vma_prot
)
101 if (ppc_md
.phys_mem_access_prot
)
102 return ppc_md
.phys_mem_access_prot(file
, pfn
, size
, vma_prot
);
104 if (!page_is_ram(pfn
))
105 vma_prot
= pgprot_noncached(vma_prot
);
109 EXPORT_SYMBOL(phys_mem_access_prot
);
111 #ifdef CONFIG_MEMORY_HOTPLUG
114 int memory_add_physaddr_to_nid(u64 start
)
116 return hot_add_scn_to_nid(start
);
120 int __weak
create_section_mapping(unsigned long start
, unsigned long end
)
125 int __weak
remove_section_mapping(unsigned long start
, unsigned long end
)
130 int arch_add_memory(int nid
, u64 start
, u64 size
, bool for_device
)
132 struct pglist_data
*pgdata
;
134 unsigned long start_pfn
= start
>> PAGE_SHIFT
;
135 unsigned long nr_pages
= size
>> PAGE_SHIFT
;
138 pgdata
= NODE_DATA(nid
);
140 start
= (unsigned long)__va(start
);
141 rc
= create_section_mapping(start
, start
+ size
);
144 "Unable to create mapping for hot added memory 0x%llx..0x%llx: %d\n",
145 start
, start
+ size
, rc
);
149 /* this should work for most non-highmem platforms */
150 zone
= pgdata
->node_zones
+
151 zone_for_memory(nid
, start
, size
, 0, for_device
);
153 return __add_pages(nid
, zone
, start_pfn
, nr_pages
);
156 #ifdef CONFIG_MEMORY_HOTREMOVE
157 int arch_remove_memory(u64 start
, u64 size
)
159 unsigned long start_pfn
= start
>> PAGE_SHIFT
;
160 unsigned long nr_pages
= size
>> PAGE_SHIFT
;
164 zone
= page_zone(pfn_to_page(start_pfn
));
165 ret
= __remove_pages(zone
, start_pfn
, nr_pages
);
169 /* Remove htab bolted mappings for this section of memory */
170 start
= (unsigned long)__va(start
);
171 ret
= remove_section_mapping(start
, start
+ size
);
173 /* Ensure all vmalloc mappings are flushed in case they also
174 * hit that section of memory
181 #endif /* CONFIG_MEMORY_HOTPLUG */
184 * walk_memory_resource() needs to make sure there is no holes in a given
185 * memory range. PPC64 does not maintain the memory layout in /proc/iomem.
186 * Instead it maintains it in memblock.memory structures. Walk through the
187 * memory regions, find holes and callback for contiguous regions.
190 walk_system_ram_range(unsigned long start_pfn
, unsigned long nr_pages
,
191 void *arg
, int (*func
)(unsigned long, unsigned long, void *))
193 struct memblock_region
*reg
;
194 unsigned long end_pfn
= start_pfn
+ nr_pages
;
195 unsigned long tstart
, tend
;
198 for_each_memblock(memory
, reg
) {
199 tstart
= max(start_pfn
, memblock_region_memory_base_pfn(reg
));
200 tend
= min(end_pfn
, memblock_region_memory_end_pfn(reg
));
203 ret
= (*func
)(tstart
, tend
- tstart
, arg
);
209 EXPORT_SYMBOL_GPL(walk_system_ram_range
);
211 #ifndef CONFIG_NEED_MULTIPLE_NODES
212 void __init
initmem_init(void)
214 max_low_pfn
= max_pfn
= memblock_end_of_DRAM() >> PAGE_SHIFT
;
215 min_low_pfn
= MEMORY_START
>> PAGE_SHIFT
;
216 #ifdef CONFIG_HIGHMEM
217 max_low_pfn
= lowmem_end_addr
>> PAGE_SHIFT
;
220 /* Place all memblock_regions in the same node and merge contiguous
223 memblock_set_node(0, (phys_addr_t
)ULLONG_MAX
, &memblock
.memory
, 0);
225 /* XXX need to clip this if using highmem? */
226 sparse_memory_present_with_active_regions(0);
230 /* mark pages that don't exist as nosave */
231 static int __init
mark_nonram_nosave(void)
233 struct memblock_region
*reg
, *prev
= NULL
;
235 for_each_memblock(memory
, reg
) {
237 memblock_region_memory_end_pfn(prev
) < memblock_region_memory_base_pfn(reg
))
238 register_nosave_region(memblock_region_memory_end_pfn(prev
),
239 memblock_region_memory_base_pfn(reg
));
244 #else /* CONFIG_NEED_MULTIPLE_NODES */
245 static int __init
mark_nonram_nosave(void)
251 static bool zone_limits_final
;
254 * The memory zones past TOP_ZONE are managed by generic mm code.
255 * These should be set to zero since that's what every other
258 static unsigned long max_zone_pfns
[MAX_NR_ZONES
] = {
259 [0 ... TOP_ZONE
] = ~0UL,
260 [TOP_ZONE
+ 1 ... MAX_NR_ZONES
- 1] = 0
264 * Restrict the specified zone and all more restrictive zones
265 * to be below the specified pfn. May not be called after
268 void __init
limit_zone_pfn(enum zone_type zone
, unsigned long pfn_limit
)
272 if (WARN_ON(zone_limits_final
))
275 for (i
= zone
; i
>= 0; i
--) {
276 if (max_zone_pfns
[i
] > pfn_limit
)
277 max_zone_pfns
[i
] = pfn_limit
;
282 * Find the least restrictive zone that is entirely below the
283 * specified pfn limit. Returns < 0 if no suitable zone is found.
285 * pfn_limit must be u64 because it can exceed 32 bits even on 32-bit
286 * systems -- the DMA limit can be higher than any possible real pfn.
288 int dma_pfn_limit_to_zone(u64 pfn_limit
)
292 for (i
= TOP_ZONE
; i
>= 0; i
--) {
293 if (max_zone_pfns
[i
] <= pfn_limit
)
301 * paging_init() sets up the page tables - in fact we've already done this.
303 void __init
paging_init(void)
305 unsigned long long total_ram
= memblock_phys_mem_size();
306 phys_addr_t top_of_ram
= memblock_end_of_DRAM();
309 unsigned long v
= __fix_to_virt(__end_of_fixed_addresses
- 1);
310 unsigned long end
= __fix_to_virt(FIX_HOLE
);
312 for (; v
< end
; v
+= PAGE_SIZE
)
313 map_page(v
, 0, 0); /* XXX gross */
316 #ifdef CONFIG_HIGHMEM
317 map_page(PKMAP_BASE
, 0, 0); /* XXX gross */
318 pkmap_page_table
= virt_to_kpte(PKMAP_BASE
);
320 kmap_pte
= virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN
));
321 kmap_prot
= PAGE_KERNEL
;
322 #endif /* CONFIG_HIGHMEM */
324 printk(KERN_DEBUG
"Top of RAM: 0x%llx, Total RAM: 0x%llx\n",
325 (unsigned long long)top_of_ram
, total_ram
);
326 printk(KERN_DEBUG
"Memory hole size: %ldMB\n",
327 (long int)((top_of_ram
- total_ram
) >> 20));
329 #ifdef CONFIG_HIGHMEM
330 limit_zone_pfn(ZONE_NORMAL
, lowmem_end_addr
>> PAGE_SHIFT
);
332 limit_zone_pfn(TOP_ZONE
, top_of_ram
>> PAGE_SHIFT
);
333 zone_limits_final
= true;
334 free_area_init_nodes(max_zone_pfns
);
336 mark_nonram_nosave();
339 void __init
mem_init(void)
342 * book3s is limited to 16 page sizes due to encoding this in
343 * a 4-bit field for slices.
345 BUILD_BUG_ON(MMU_PAGE_COUNT
> 16);
347 #ifdef CONFIG_SWIOTLB
351 high_memory
= (void *) __va(max_low_pfn
* PAGE_SIZE
);
352 set_max_mapnr(max_pfn
);
355 #ifdef CONFIG_HIGHMEM
357 unsigned long pfn
, highmem_mapnr
;
359 highmem_mapnr
= lowmem_end_addr
>> PAGE_SHIFT
;
360 for (pfn
= highmem_mapnr
; pfn
< max_mapnr
; ++pfn
) {
361 phys_addr_t paddr
= (phys_addr_t
)pfn
<< PAGE_SHIFT
;
362 struct page
*page
= pfn_to_page(pfn
);
363 if (!memblock_is_reserved(paddr
))
364 free_highmem_page(page
);
367 #endif /* CONFIG_HIGHMEM */
369 #if defined(CONFIG_PPC_FSL_BOOK3E) && !defined(CONFIG_SMP)
371 * If smp is enabled, next_tlbcam_idx is initialized in the cpu up
372 * functions.... do it here for the non-smp case.
374 per_cpu(next_tlbcam_idx
, smp_processor_id()) =
375 (mfspr(SPRN_TLB1CFG
) & TLBnCFG_N_ENTRY
) - 1;
378 mem_init_print_info(NULL
);
380 pr_info("Kernel virtual memory layout:\n");
381 pr_info(" * 0x%08lx..0x%08lx : fixmap\n", FIXADDR_START
, FIXADDR_TOP
);
382 #ifdef CONFIG_HIGHMEM
383 pr_info(" * 0x%08lx..0x%08lx : highmem PTEs\n",
384 PKMAP_BASE
, PKMAP_ADDR(LAST_PKMAP
));
385 #endif /* CONFIG_HIGHMEM */
386 #ifdef CONFIG_NOT_COHERENT_CACHE
387 pr_info(" * 0x%08lx..0x%08lx : consistent mem\n",
388 IOREMAP_TOP
, IOREMAP_TOP
+ CONFIG_CONSISTENT_SIZE
);
389 #endif /* CONFIG_NOT_COHERENT_CACHE */
390 pr_info(" * 0x%08lx..0x%08lx : early ioremap\n",
391 ioremap_bot
, IOREMAP_TOP
);
392 pr_info(" * 0x%08lx..0x%08lx : vmalloc & ioremap\n",
393 VMALLOC_START
, VMALLOC_END
);
394 #endif /* CONFIG_PPC32 */
397 void free_initmem(void)
399 ppc_md
.progress
= ppc_printk_progress
;
400 init_mem_is_free
= true;
401 free_initmem_default(POISON_FREE_INITMEM
);
404 #ifdef CONFIG_BLK_DEV_INITRD
405 void __init
free_initrd_mem(unsigned long start
, unsigned long end
)
407 free_reserved_area((void *)start
, (void *)end
, -1, "initrd");
412 * This is called when a page has been modified by the kernel.
413 * It just marks the page as not i-cache clean. We do the i-cache
414 * flush later when the page is given to a user process, if necessary.
416 void flush_dcache_page(struct page
*page
)
418 if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE
))
420 /* avoid an atomic op if possible */
421 if (test_bit(PG_arch_1
, &page
->flags
))
422 clear_bit(PG_arch_1
, &page
->flags
);
424 EXPORT_SYMBOL(flush_dcache_page
);
426 void flush_dcache_icache_page(struct page
*page
)
428 #ifdef CONFIG_HUGETLB_PAGE
429 if (PageCompound(page
)) {
430 flush_dcache_icache_hugepage(page
);
434 #if defined(CONFIG_8xx) || defined(CONFIG_PPC64)
435 /* On 8xx there is no need to kmap since highmem is not supported */
436 __flush_dcache_icache(page_address(page
));
438 if (IS_ENABLED(CONFIG_BOOKE
) || sizeof(phys_addr_t
) > sizeof(void *)) {
439 void *start
= kmap_atomic(page
);
440 __flush_dcache_icache(start
);
441 kunmap_atomic(start
);
443 __flush_dcache_icache_phys(page_to_pfn(page
) << PAGE_SHIFT
);
447 EXPORT_SYMBOL(flush_dcache_icache_page
);
449 void clear_user_page(void *page
, unsigned long vaddr
, struct page
*pg
)
454 * We shouldn't have to do this, but some versions of glibc
455 * require it (ld.so assumes zero filled pages are icache clean)
458 flush_dcache_page(pg
);
460 EXPORT_SYMBOL(clear_user_page
);
462 void copy_user_page(void *vto
, void *vfrom
, unsigned long vaddr
,
465 copy_page(vto
, vfrom
);
468 * We should be able to use the following optimisation, however
469 * there are two problems.
470 * Firstly a bug in some versions of binutils meant PLT sections
471 * were not marked executable.
472 * Secondly the first word in the GOT section is blrl, used
473 * to establish the GOT address. Until recently the GOT was
474 * not marked executable.
478 if (!vma
->vm_file
&& ((vma
->vm_flags
& VM_EXEC
) == 0))
482 flush_dcache_page(pg
);
485 void flush_icache_user_range(struct vm_area_struct
*vma
, struct page
*page
,
486 unsigned long addr
, int len
)
490 maddr
= (unsigned long) kmap(page
) + (addr
& ~PAGE_MASK
);
491 flush_icache_range(maddr
, maddr
+ len
);
494 EXPORT_SYMBOL(flush_icache_user_range
);
497 * This is called at the end of handling a user page fault, when the
498 * fault has been handled by updating a PTE in the linux page tables.
499 * We use it to preload an HPTE into the hash table corresponding to
500 * the updated linux PTE.
502 * This must always be called with the pte lock held.
504 void update_mmu_cache(struct vm_area_struct
*vma
, unsigned long address
,
507 #ifdef CONFIG_PPC_STD_MMU
509 * We don't need to worry about _PAGE_PRESENT here because we are
510 * called with either mm->page_table_lock held or ptl lock held
512 unsigned long access
, trap
;
517 /* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
518 if (!pte_young(*ptep
) || address
>= TASK_SIZE
)
521 /* We try to figure out if we are coming from an instruction
522 * access fault and pass that down to __hash_page so we avoid
523 * double-faulting on execution of fresh text. We have to test
524 * for regs NULL since init will get here first thing at boot
526 * We also avoid filling the hash if not coming from a fault
529 trap
= current
->thread
.regs
? TRAP(current
->thread
.regs
) : 0UL;
541 hash_preload(vma
->vm_mm
, address
, access
, trap
);
542 #endif /* CONFIG_PPC_STD_MMU */
543 #if (defined(CONFIG_PPC_BOOK3E_64) || defined(CONFIG_PPC_FSL_BOOK3E)) \
544 && defined(CONFIG_HUGETLB_PAGE)
545 if (is_vm_hugetlb_page(vma
))
546 book3e_hugetlb_preload(vma
, address
, *ptep
);
551 * System memory should not be in /proc/iomem but various tools expect it
554 static int __init
add_system_ram_resources(void)
556 struct memblock_region
*reg
;
558 for_each_memblock(memory
, reg
) {
559 struct resource
*res
;
560 unsigned long base
= reg
->base
;
561 unsigned long size
= reg
->size
;
563 res
= kzalloc(sizeof(struct resource
), GFP_KERNEL
);
567 res
->name
= "System RAM";
569 res
->end
= base
+ size
- 1;
570 res
->flags
= IORESOURCE_SYSTEM_RAM
| IORESOURCE_BUSY
;
571 WARN_ON(request_resource(&iomem_resource
, res
) < 0);
577 subsys_initcall(add_system_ram_resources
);
579 #ifdef CONFIG_STRICT_DEVMEM
581 * devmem_is_allowed(): check to see if /dev/mem access to a certain address
582 * is valid. The argument is a physical page number.
584 * Access has to be given to non-kernel-ram areas as well, these contain the
585 * PCI mmio resources as well as potential bios/acpi data regions.
587 int devmem_is_allowed(unsigned long pfn
)
589 if (page_is_rtas_user_buf(pfn
))
591 if (iomem_is_exclusive(PFN_PHYS(pfn
)))
593 if (!page_is_ram(pfn
))
597 #endif /* CONFIG_STRICT_DEVMEM */