1 // SPDX-License-Identifier: GPL-2.0
3 * Initialize MMU support.
5 * Copyright (C) 1998-2003 Hewlett-Packard Co
6 * David Mosberger-Tang <davidm@hpl.hp.com>
8 #include <linux/kernel.h>
9 #include <linux/init.h>
11 #include <linux/bootmem.h>
12 #include <linux/efi.h>
13 #include <linux/elf.h>
14 #include <linux/memblock.h>
16 #include <linux/sched/signal.h>
17 #include <linux/mmzone.h>
18 #include <linux/module.h>
19 #include <linux/personality.h>
20 #include <linux/reboot.h>
21 #include <linux/slab.h>
22 #include <linux/swap.h>
23 #include <linux/proc_fs.h>
24 #include <linux/bitops.h>
25 #include <linux/kexec.h>
29 #include <asm/machvec.h>
31 #include <asm/patch.h>
32 #include <asm/pgalloc.h>
34 #include <asm/sections.h>
36 #include <linux/uaccess.h>
37 #include <asm/unistd.h>
40 extern void ia64_tlb_init (void);
42 unsigned long MAX_DMA_ADDRESS
= PAGE_OFFSET
+ 0x100000000UL
;
44 #ifdef CONFIG_VIRTUAL_MEM_MAP
45 unsigned long VMALLOC_END
= VMALLOC_END_INIT
;
46 EXPORT_SYMBOL(VMALLOC_END
);
47 struct page
*vmem_map
;
48 EXPORT_SYMBOL(vmem_map
);
51 struct page
*zero_page_memmap_ptr
; /* map entry for zero page */
52 EXPORT_SYMBOL(zero_page_memmap_ptr
);
55 __ia64_sync_icache_dcache (pte_t pte
)
61 addr
= (unsigned long) page_address(page
);
63 if (test_bit(PG_arch_1
, &page
->flags
))
64 return; /* i-cache is already coherent with d-cache */
66 flush_icache_range(addr
, addr
+ (PAGE_SIZE
<< compound_order(page
)));
67 set_bit(PG_arch_1
, &page
->flags
); /* mark page as clean */
71 * Since DMA is i-cache coherent, any (complete) pages that were written via
72 * DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to
73 * flush them when they get mapped into an executable vm-area.
76 dma_mark_clean(void *addr
, size_t size
)
78 unsigned long pg_addr
, end
;
80 pg_addr
= PAGE_ALIGN((unsigned long) addr
);
81 end
= (unsigned long) addr
+ size
;
82 while (pg_addr
+ PAGE_SIZE
<= end
) {
83 struct page
*page
= virt_to_page(pg_addr
);
84 set_bit(PG_arch_1
, &page
->flags
);
90 ia64_set_rbs_bot (void)
92 unsigned long stack_size
= rlimit_max(RLIMIT_STACK
) & -16;
94 if (stack_size
> MAX_USER_STACK_SIZE
)
95 stack_size
= MAX_USER_STACK_SIZE
;
96 current
->thread
.rbs_bot
= PAGE_ALIGN(current
->mm
->start_stack
- stack_size
);
100 * This performs some platform-dependent address space initialization.
101 * On IA-64, we want to setup the VM area for the register backing
102 * store (which grows upwards) and install the gateway page which is
103 * used for signal trampolines, etc.
106 ia64_init_addr_space (void)
108 struct vm_area_struct
*vma
;
113 * If we're out of memory and kmem_cache_alloc() returns NULL, we simply ignore
114 * the problem. When the process attempts to write to the register backing store
115 * for the first time, it will get a SEGFAULT in this case.
117 vma
= kmem_cache_zalloc(vm_area_cachep
, GFP_KERNEL
);
119 INIT_LIST_HEAD(&vma
->anon_vma_chain
);
120 vma
->vm_mm
= current
->mm
;
121 vma
->vm_start
= current
->thread
.rbs_bot
& PAGE_MASK
;
122 vma
->vm_end
= vma
->vm_start
+ PAGE_SIZE
;
123 vma
->vm_flags
= VM_DATA_DEFAULT_FLAGS
|VM_GROWSUP
|VM_ACCOUNT
;
124 vma
->vm_page_prot
= vm_get_page_prot(vma
->vm_flags
);
125 down_write(¤t
->mm
->mmap_sem
);
126 if (insert_vm_struct(current
->mm
, vma
)) {
127 up_write(¤t
->mm
->mmap_sem
);
128 kmem_cache_free(vm_area_cachep
, vma
);
131 up_write(¤t
->mm
->mmap_sem
);
134 /* map NaT-page at address zero to speed up speculative dereferencing of NULL: */
135 if (!(current
->personality
& MMAP_PAGE_ZERO
)) {
136 vma
= kmem_cache_zalloc(vm_area_cachep
, GFP_KERNEL
);
138 INIT_LIST_HEAD(&vma
->anon_vma_chain
);
139 vma
->vm_mm
= current
->mm
;
140 vma
->vm_end
= PAGE_SIZE
;
141 vma
->vm_page_prot
= __pgprot(pgprot_val(PAGE_READONLY
) | _PAGE_MA_NAT
);
142 vma
->vm_flags
= VM_READ
| VM_MAYREAD
| VM_IO
|
143 VM_DONTEXPAND
| VM_DONTDUMP
;
144 down_write(¤t
->mm
->mmap_sem
);
145 if (insert_vm_struct(current
->mm
, vma
)) {
146 up_write(¤t
->mm
->mmap_sem
);
147 kmem_cache_free(vm_area_cachep
, vma
);
150 up_write(¤t
->mm
->mmap_sem
);
158 free_reserved_area(ia64_imva(__init_begin
), ia64_imva(__init_end
),
159 -1, "unused kernel");
163 free_initrd_mem (unsigned long start
, unsigned long end
)
166 * EFI uses 4KB pages while the kernel can use 4KB or bigger.
167 * Thus EFI and the kernel may have different page sizes. It is
168 * therefore possible to have the initrd share the same page as
169 * the end of the kernel (given current setup).
171 * To avoid freeing/using the wrong page (kernel sized) we:
172 * - align up the beginning of initrd
173 * - align down the end of initrd
176 * |=============| a000
182 * |=============| 8000
185 * |/////////////| 7000
188 * |=============| 6000
191 * K=kernel using 8KB pages
193 * In this example, we must free page 8000 ONLY. So we must align up
194 * initrd_start and keep initrd_end as is.
196 start
= PAGE_ALIGN(start
);
197 end
= end
& PAGE_MASK
;
200 printk(KERN_INFO
"Freeing initrd memory: %ldkB freed\n", (end
- start
) >> 10);
202 for (; start
< end
; start
+= PAGE_SIZE
) {
203 if (!virt_addr_valid(start
))
205 free_reserved_page(virt_to_page(start
));
210 * This installs a clean page in the kernel's page table.
212 static struct page
* __init
213 put_kernel_page (struct page
*page
, unsigned long address
, pgprot_t pgprot
)
220 pgd
= pgd_offset_k(address
); /* note: this is NOT pgd_offset()! */
223 pud
= pud_alloc(&init_mm
, pgd
, address
);
226 pmd
= pmd_alloc(&init_mm
, pud
, address
);
229 pte
= pte_alloc_kernel(pmd
, address
);
234 set_pte(pte
, mk_pte(page
, pgprot
));
237 /* no need for flush_tlb */
247 * Map the gate page twice: once read-only to export the ELF
248 * headers etc. and once execute-only page to enable
249 * privilege-promotion via "epc":
251 page
= virt_to_page(ia64_imva(__start_gate_section
));
252 put_kernel_page(page
, GATE_ADDR
, PAGE_READONLY
);
253 #ifdef HAVE_BUGGY_SEGREL
254 page
= virt_to_page(ia64_imva(__start_gate_section
+ PAGE_SIZE
));
255 put_kernel_page(page
, GATE_ADDR
+ PAGE_SIZE
, PAGE_GATE
);
257 put_kernel_page(page
, GATE_ADDR
+ PERCPU_PAGE_SIZE
, PAGE_GATE
);
258 /* Fill in the holes (if any) with read-only zero pages: */
262 for (addr
= GATE_ADDR
+ PAGE_SIZE
;
263 addr
< GATE_ADDR
+ PERCPU_PAGE_SIZE
;
266 put_kernel_page(ZERO_PAGE(0), addr
,
268 put_kernel_page(ZERO_PAGE(0), addr
+ PERCPU_PAGE_SIZE
,
276 static struct vm_area_struct gate_vma
;
278 static int __init
gate_vma_init(void)
280 gate_vma
.vm_mm
= NULL
;
281 gate_vma
.vm_start
= FIXADDR_USER_START
;
282 gate_vma
.vm_end
= FIXADDR_USER_END
;
283 gate_vma
.vm_flags
= VM_READ
| VM_MAYREAD
| VM_EXEC
| VM_MAYEXEC
;
284 gate_vma
.vm_page_prot
= __P101
;
288 __initcall(gate_vma_init
);
290 struct vm_area_struct
*get_gate_vma(struct mm_struct
*mm
)
295 int in_gate_area_no_mm(unsigned long addr
)
297 if ((addr
>= FIXADDR_USER_START
) && (addr
< FIXADDR_USER_END
))
302 int in_gate_area(struct mm_struct
*mm
, unsigned long addr
)
304 return in_gate_area_no_mm(addr
);
307 void ia64_mmu_init(void *my_cpu_data
)
309 unsigned long pta
, impl_va_bits
;
310 extern void tlb_init(void);
312 #ifdef CONFIG_DISABLE_VHPT
313 # define VHPT_ENABLE_BIT 0
315 # define VHPT_ENABLE_BIT 1
319 * Check if the virtually mapped linear page table (VMLPT) overlaps with a mapped
320 * address space. The IA-64 architecture guarantees that at least 50 bits of
321 * virtual address space are implemented but if we pick a large enough page size
322 * (e.g., 64KB), the mapped address space is big enough that it will overlap with
323 * VMLPT. I assume that once we run on machines big enough to warrant 64KB pages,
324 * IMPL_VA_MSB will be significantly bigger, so this is unlikely to become a
325 * problem in practice. Alternatively, we could truncate the top of the mapped
326 * address space to not permit mappings that would overlap with the VMLPT.
330 # define mapped_space_bits (3*(PAGE_SHIFT - pte_bits) + PAGE_SHIFT)
332 * The virtual page table has to cover the entire implemented address space within
333 * a region even though not all of this space may be mappable. The reason for
334 * this is that the Access bit and Dirty bit fault handlers perform
335 * non-speculative accesses to the virtual page table, so the address range of the
336 * virtual page table itself needs to be covered by virtual page table.
338 # define vmlpt_bits (impl_va_bits - PAGE_SHIFT + pte_bits)
339 # define POW2(n) (1ULL << (n))
341 impl_va_bits
= ffz(~(local_cpu_data
->unimpl_va_mask
| (7UL << 61)));
343 if (impl_va_bits
< 51 || impl_va_bits
> 61)
344 panic("CPU has bogus IMPL_VA_MSB value of %lu!\n", impl_va_bits
- 1);
346 * mapped_space_bits - PAGE_SHIFT is the total number of ptes we need,
347 * which must fit into "vmlpt_bits - pte_bits" slots. Second half of
348 * the test makes sure that our mapped space doesn't overlap the
349 * unimplemented hole in the middle of the region.
351 if ((mapped_space_bits
- PAGE_SHIFT
> vmlpt_bits
- pte_bits
) ||
352 (mapped_space_bits
> impl_va_bits
- 1))
353 panic("Cannot build a big enough virtual-linear page table"
354 " to cover mapped address space.\n"
355 " Try using a smaller page size.\n");
358 /* place the VMLPT at the end of each page-table mapped region: */
359 pta
= POW2(61) - POW2(vmlpt_bits
);
362 * Set the (virtually mapped linear) page table address. Bit
363 * 8 selects between the short and long format, bits 2-7 the
364 * size of the table, and bit 0 whether the VHPT walker is
367 ia64_set_pta(pta
| (0 << 8) | (vmlpt_bits
<< 2) | VHPT_ENABLE_BIT
);
371 #ifdef CONFIG_HUGETLB_PAGE
372 ia64_set_rr(HPAGE_REGION_BASE
, HPAGE_SHIFT
<< 2);
377 #ifdef CONFIG_VIRTUAL_MEM_MAP
378 int vmemmap_find_next_valid_pfn(int node
, int i
)
380 unsigned long end_address
, hole_next_pfn
;
381 unsigned long stop_address
;
382 pg_data_t
*pgdat
= NODE_DATA(node
);
384 end_address
= (unsigned long) &vmem_map
[pgdat
->node_start_pfn
+ i
];
385 end_address
= PAGE_ALIGN(end_address
);
386 stop_address
= (unsigned long) &vmem_map
[pgdat_end_pfn(pgdat
)];
394 pgd
= pgd_offset_k(end_address
);
395 if (pgd_none(*pgd
)) {
396 end_address
+= PGDIR_SIZE
;
400 pud
= pud_offset(pgd
, end_address
);
401 if (pud_none(*pud
)) {
402 end_address
+= PUD_SIZE
;
406 pmd
= pmd_offset(pud
, end_address
);
407 if (pmd_none(*pmd
)) {
408 end_address
+= PMD_SIZE
;
412 pte
= pte_offset_kernel(pmd
, end_address
);
414 if (pte_none(*pte
)) {
415 end_address
+= PAGE_SIZE
;
417 if ((end_address
< stop_address
) &&
418 (end_address
!= ALIGN(end_address
, 1UL << PMD_SHIFT
)))
422 /* Found next valid vmem_map page */
424 } while (end_address
< stop_address
);
426 end_address
= min(end_address
, stop_address
);
427 end_address
= end_address
- (unsigned long) vmem_map
+ sizeof(struct page
) - 1;
428 hole_next_pfn
= end_address
/ sizeof(struct page
);
429 return hole_next_pfn
- pgdat
->node_start_pfn
;
432 int __init
create_mem_map_page_table(u64 start
, u64 end
, void *arg
)
434 unsigned long address
, start_page
, end_page
;
435 struct page
*map_start
, *map_end
;
442 map_start
= vmem_map
+ (__pa(start
) >> PAGE_SHIFT
);
443 map_end
= vmem_map
+ (__pa(end
) >> PAGE_SHIFT
);
445 start_page
= (unsigned long) map_start
& PAGE_MASK
;
446 end_page
= PAGE_ALIGN((unsigned long) map_end
);
447 node
= paddr_to_nid(__pa(start
));
449 for (address
= start_page
; address
< end_page
; address
+= PAGE_SIZE
) {
450 pgd
= pgd_offset_k(address
);
452 pgd_populate(&init_mm
, pgd
, alloc_bootmem_pages_node(NODE_DATA(node
), PAGE_SIZE
));
453 pud
= pud_offset(pgd
, address
);
456 pud_populate(&init_mm
, pud
, alloc_bootmem_pages_node(NODE_DATA(node
), PAGE_SIZE
));
457 pmd
= pmd_offset(pud
, address
);
460 pmd_populate_kernel(&init_mm
, pmd
, alloc_bootmem_pages_node(NODE_DATA(node
), PAGE_SIZE
));
461 pte
= pte_offset_kernel(pmd
, address
);
464 set_pte(pte
, pfn_pte(__pa(alloc_bootmem_pages_node(NODE_DATA(node
), PAGE_SIZE
)) >> PAGE_SHIFT
,
470 struct memmap_init_callback_data
{
478 virtual_memmap_init(u64 start
, u64 end
, void *arg
)
480 struct memmap_init_callback_data
*args
;
481 struct page
*map_start
, *map_end
;
483 args
= (struct memmap_init_callback_data
*) arg
;
484 map_start
= vmem_map
+ (__pa(start
) >> PAGE_SHIFT
);
485 map_end
= vmem_map
+ (__pa(end
) >> PAGE_SHIFT
);
487 if (map_start
< args
->start
)
488 map_start
= args
->start
;
489 if (map_end
> args
->end
)
493 * We have to initialize "out of bounds" struct page elements that fit completely
494 * on the same pages that were allocated for the "in bounds" elements because they
495 * may be referenced later (and found to be "reserved").
497 map_start
-= ((unsigned long) map_start
& (PAGE_SIZE
- 1)) / sizeof(struct page
);
498 map_end
+= ((PAGE_ALIGN((unsigned long) map_end
) - (unsigned long) map_end
)
499 / sizeof(struct page
));
501 if (map_start
< map_end
)
502 memmap_init_zone((unsigned long)(map_end
- map_start
),
503 args
->nid
, args
->zone
, page_to_pfn(map_start
),
509 memmap_init (unsigned long size
, int nid
, unsigned long zone
,
510 unsigned long start_pfn
)
513 memmap_init_zone(size
, nid
, zone
, start_pfn
, MEMMAP_EARLY
);
516 struct memmap_init_callback_data args
;
518 start
= pfn_to_page(start_pfn
);
520 args
.end
= start
+ size
;
524 efi_memmap_walk(virtual_memmap_init
, &args
);
529 ia64_pfn_valid (unsigned long pfn
)
532 struct page
*pg
= pfn_to_page(pfn
);
534 return (__get_user(byte
, (char __user
*) pg
) == 0)
535 && ((((u64
)pg
& PAGE_MASK
) == (((u64
)(pg
+ 1) - 1) & PAGE_MASK
))
536 || (__get_user(byte
, (char __user
*) (pg
+ 1) - 1) == 0));
538 EXPORT_SYMBOL(ia64_pfn_valid
);
540 int __init
find_largest_hole(u64 start
, u64 end
, void *arg
)
544 static u64 last_end
= PAGE_OFFSET
;
546 /* NOTE: this algorithm assumes efi memmap table is ordered */
548 if (*max_gap
< (start
- last_end
))
549 *max_gap
= start
- last_end
;
554 #endif /* CONFIG_VIRTUAL_MEM_MAP */
556 int __init
register_active_ranges(u64 start
, u64 len
, int nid
)
558 u64 end
= start
+ len
;
561 if (start
> crashk_res
.start
&& start
< crashk_res
.end
)
562 start
= crashk_res
.end
;
563 if (end
> crashk_res
.start
&& end
< crashk_res
.end
)
564 end
= crashk_res
.start
;
568 memblock_add_node(__pa(start
), end
- start
, nid
);
573 find_max_min_low_pfn (u64 start
, u64 end
, void *arg
)
575 unsigned long pfn_start
, pfn_end
;
576 #ifdef CONFIG_FLATMEM
577 pfn_start
= (PAGE_ALIGN(__pa(start
))) >> PAGE_SHIFT
;
578 pfn_end
= (PAGE_ALIGN(__pa(end
- 1))) >> PAGE_SHIFT
;
580 pfn_start
= GRANULEROUNDDOWN(__pa(start
)) >> PAGE_SHIFT
;
581 pfn_end
= GRANULEROUNDUP(__pa(end
- 1)) >> PAGE_SHIFT
;
583 min_low_pfn
= min(min_low_pfn
, pfn_start
);
584 max_low_pfn
= max(max_low_pfn
, pfn_end
);
589 * Boot command-line option "nolwsys" can be used to disable the use of any light-weight
590 * system call handler. When this option is in effect, all fsyscalls will end up bubbling
591 * down into the kernel and calling the normal (heavy-weight) syscall handler. This is
592 * useful for performance testing, but conceivably could also come in handy for debugging
596 static int nolwsys __initdata
;
599 nolwsys_setup (char *s
)
605 __setup("nolwsys", nolwsys_setup
);
612 BUG_ON(PTRS_PER_PGD
* sizeof(pgd_t
) != PAGE_SIZE
);
613 BUG_ON(PTRS_PER_PMD
* sizeof(pmd_t
) != PAGE_SIZE
);
614 BUG_ON(PTRS_PER_PTE
* sizeof(pte_t
) != PAGE_SIZE
);
618 * This needs to be called _after_ the command line has been parsed but _before_
619 * any drivers that may need the PCI DMA interface are initialized or bootmem has
625 #ifdef CONFIG_FLATMEM
629 set_max_mapnr(max_low_pfn
);
630 high_memory
= __va(max_low_pfn
* PAGE_SIZE
);
632 mem_init_print_info(NULL
);
635 * For fsyscall entrpoints with no light-weight handler, use the ordinary
636 * (heavy-weight) handler, but mark it by setting bit 0, so the fsyscall entry
637 * code can tell them apart.
639 for (i
= 0; i
< NR_syscalls
; ++i
) {
640 extern unsigned long fsyscall_table
[NR_syscalls
];
641 extern unsigned long sys_call_table
[NR_syscalls
];
643 if (!fsyscall_table
[i
] || nolwsys
)
644 fsyscall_table
[i
] = sys_call_table
[i
] | 1;
649 #ifdef CONFIG_MEMORY_HOTPLUG
650 int arch_add_memory(int nid
, u64 start
, u64 size
, bool want_memblock
)
652 unsigned long start_pfn
= start
>> PAGE_SHIFT
;
653 unsigned long nr_pages
= size
>> PAGE_SHIFT
;
656 ret
= __add_pages(nid
, start_pfn
, nr_pages
, want_memblock
);
658 printk("%s: Problem encountered in __add_pages() as ret=%d\n",
664 #ifdef CONFIG_MEMORY_HOTREMOVE
665 int arch_remove_memory(u64 start
, u64 size
)
667 unsigned long start_pfn
= start
>> PAGE_SHIFT
;
668 unsigned long nr_pages
= size
>> PAGE_SHIFT
;
672 zone
= page_zone(pfn_to_page(start_pfn
));
673 ret
= __remove_pages(zone
, start_pfn
, nr_pages
);
675 pr_warn("%s: Problem encountered in __remove_pages() as"
676 " ret=%d\n", __func__
, ret
);