mm: make wait_on_page_writeback() wait for multiple pending writebacks
[linux/fpc-iii.git] / arch / arm / mm / idmap.c
blob448e57c6f65344fd79ceced8708760fa0217239b
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/module.h>
3 #include <linux/kernel.h>
4 #include <linux/slab.h>
5 #include <linux/mm_types.h>
6 #include <linux/pgtable.h>
8 #include <asm/cputype.h>
9 #include <asm/idmap.h>
10 #include <asm/hwcap.h>
11 #include <asm/pgalloc.h>
12 #include <asm/sections.h>
13 #include <asm/system_info.h>
16 * Note: accesses outside of the kernel image and the identity map area
17 * are not supported on any CPU using the idmap tables as its current
18 * page tables.
20 pgd_t *idmap_pgd __ro_after_init;
21 long long arch_phys_to_idmap_offset __ro_after_init;
23 #ifdef CONFIG_ARM_LPAE
24 static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end,
25 unsigned long prot)
27 pmd_t *pmd;
28 unsigned long next;
30 if (pud_none_or_clear_bad(pud) || (pud_val(*pud) & L_PGD_SWAPPER)) {
31 pmd = pmd_alloc_one(&init_mm, addr);
32 if (!pmd) {
33 pr_warn("Failed to allocate identity pmd.\n");
34 return;
37 * Copy the original PMD to ensure that the PMD entries for
38 * the kernel image are preserved.
40 if (!pud_none(*pud))
41 memcpy(pmd, pmd_offset(pud, 0),
42 PTRS_PER_PMD * sizeof(pmd_t));
43 pud_populate(&init_mm, pud, pmd);
44 pmd += pmd_index(addr);
45 } else
46 pmd = pmd_offset(pud, addr);
48 do {
49 next = pmd_addr_end(addr, end);
50 *pmd = __pmd((addr & PMD_MASK) | prot);
51 flush_pmd_entry(pmd);
52 } while (pmd++, addr = next, addr != end);
54 #else /* !CONFIG_ARM_LPAE */
55 static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end,
56 unsigned long prot)
58 pmd_t *pmd = pmd_offset(pud, addr);
60 addr = (addr & PMD_MASK) | prot;
61 pmd[0] = __pmd(addr);
62 addr += SECTION_SIZE;
63 pmd[1] = __pmd(addr);
64 flush_pmd_entry(pmd);
66 #endif /* CONFIG_ARM_LPAE */
68 static void idmap_add_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
69 unsigned long prot)
71 p4d_t *p4d = p4d_offset(pgd, addr);
72 pud_t *pud = pud_offset(p4d, addr);
73 unsigned long next;
75 do {
76 next = pud_addr_end(addr, end);
77 idmap_add_pmd(pud, addr, next, prot);
78 } while (pud++, addr = next, addr != end);
81 static void identity_mapping_add(pgd_t *pgd, const char *text_start,
82 const char *text_end, unsigned long prot)
84 unsigned long addr, end;
85 unsigned long next;
87 addr = virt_to_idmap(text_start);
88 end = virt_to_idmap(text_end);
89 pr_info("Setting up static identity map for 0x%lx - 0x%lx\n", addr, end);
91 prot |= PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AF;
93 if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale_family())
94 prot |= PMD_BIT4;
96 pgd += pgd_index(addr);
97 do {
98 next = pgd_addr_end(addr, end);
99 idmap_add_pud(pgd, addr, next, prot);
100 } while (pgd++, addr = next, addr != end);
103 extern char __idmap_text_start[], __idmap_text_end[];
105 static int __init init_static_idmap(void)
107 idmap_pgd = pgd_alloc(&init_mm);
108 if (!idmap_pgd)
109 return -ENOMEM;
111 identity_mapping_add(idmap_pgd, __idmap_text_start,
112 __idmap_text_end, 0);
114 /* Flush L1 for the hardware to see this page table content */
115 if (!(elf_hwcap & HWCAP_LPAE))
116 flush_cache_louis();
118 return 0;
120 early_initcall(init_static_idmap);
123 * In order to soft-boot, we need to switch to a 1:1 mapping for the
124 * cpu_reset functions. This will then ensure that we have predictable
125 * results when turning off the mmu.
127 void setup_mm_for_reboot(void)
129 /* Switch to the identity mapping. */
130 cpu_switch_mm(idmap_pgd, &init_mm);
131 local_flush_bp_all();
133 #ifdef CONFIG_CPU_HAS_ASID
135 * We don't have a clean ASID for the identity mapping, which
136 * may clash with virtual addresses of the previous page tables
137 * and therefore potentially in the TLB.
139 local_flush_tlb_all();
140 #endif