2 * Based on arch/arm/mm/mmu.c
4 * Copyright (C) 1995-2005 Russell King
5 * Copyright (C) 2012 ARM Ltd.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #include <linux/export.h>
21 #include <linux/kernel.h>
22 #include <linux/errno.h>
23 #include <linux/init.h>
24 #include <linux/mman.h>
25 #include <linux/nodemask.h>
26 #include <linux/memblock.h>
30 #include <asm/cputype.h>
31 #include <asm/sections.h>
32 #include <asm/setup.h>
33 #include <asm/sizes.h>
35 #include <asm/mmu_context.h>
40 * Empty_zero_page is a special page that is used for zero-initialized data
43 struct page
*empty_zero_page
;
44 EXPORT_SYMBOL(empty_zero_page
);
46 pgprot_t pgprot_default
;
47 EXPORT_SYMBOL(pgprot_default
);
49 static pmdval_t prot_sect_kernel
;
52 const char policy
[16];
57 static struct cachepolicy cache_policies
[] __initdata
= {
60 .mair
= 0x44, /* inner, outer non-cacheable */
61 .tcr
= TCR_IRGN_NC
| TCR_ORGN_NC
,
63 .policy
= "writethrough",
64 .mair
= 0xaa, /* inner, outer write-through, read-allocate */
65 .tcr
= TCR_IRGN_WT
| TCR_ORGN_WT
,
67 .policy
= "writeback",
68 .mair
= 0xee, /* inner, outer write-back, read-allocate */
69 .tcr
= TCR_IRGN_WBnWA
| TCR_ORGN_WBnWA
,
74 * These are useful for identifying cache coherency problems by allowing the
75 * cache or the cache and writebuffer to be turned off. It changes the Normal
76 * memory caching attributes in the MAIR_EL1 register.
78 static int __init
early_cachepolicy(char *p
)
83 for (i
= 0; i
< ARRAY_SIZE(cache_policies
); i
++) {
84 int len
= strlen(cache_policies
[i
].policy
);
86 if (memcmp(p
, cache_policies
[i
].policy
, len
) == 0)
89 if (i
== ARRAY_SIZE(cache_policies
)) {
90 pr_err("ERROR: unknown or unsupported cache policy: %s\n", p
);
97 * Modify MT_NORMAL attributes in MAIR_EL1.
100 " mrs %0, mair_el1\n"
101 " bfi %0, %1, #%2, #8\n"
102 " msr mair_el1, %0\n"
105 : "r" (cache_policies
[i
].mair
), "i" (MT_NORMAL
* 8));
108 * Modify TCR PTW cacheability attributes.
117 : "r" (cache_policies
[i
].tcr
), "r" (TCR_IRGN_MASK
| TCR_ORGN_MASK
));
123 early_param("cachepolicy", early_cachepolicy
);
126 * Adjust the PMD section entries according to the CPU in use.
128 static void __init
init_mem_pgprot(void)
130 pteval_t default_pgprot
;
133 default_pgprot
= PTE_ATTRINDX(MT_NORMAL
);
134 prot_sect_kernel
= PMD_TYPE_SECT
| PMD_SECT_AF
| PMD_ATTRINDX(MT_NORMAL
);
138 * Mark memory with the "shared" attribute for SMP systems
140 default_pgprot
|= PTE_SHARED
;
141 prot_sect_kernel
|= PMD_SECT_S
;
144 for (i
= 0; i
< 16; i
++) {
145 unsigned long v
= pgprot_val(protection_map
[i
]);
146 protection_map
[i
] = __pgprot(v
| default_pgprot
);
149 pgprot_default
= __pgprot(PTE_TYPE_PAGE
| PTE_AF
| default_pgprot
);
152 pgprot_t
phys_mem_access_prot(struct file
*file
, unsigned long pfn
,
153 unsigned long size
, pgprot_t vma_prot
)
156 return pgprot_noncached(vma_prot
);
157 else if (file
->f_flags
& O_SYNC
)
158 return pgprot_writecombine(vma_prot
);
161 EXPORT_SYMBOL(phys_mem_access_prot
);
163 static void __init
*early_alloc(unsigned long sz
)
165 void *ptr
= __va(memblock_alloc(sz
, sz
));
170 static void __init
alloc_init_pte(pmd_t
*pmd
, unsigned long addr
,
171 unsigned long end
, unsigned long pfn
)
175 if (pmd_none(*pmd
)) {
176 pte
= early_alloc(PTRS_PER_PTE
* sizeof(pte_t
));
177 __pmd_populate(pmd
, __pa(pte
), PMD_TYPE_TABLE
);
179 BUG_ON(pmd_bad(*pmd
));
181 pte
= pte_offset_kernel(pmd
, addr
);
183 set_pte(pte
, pfn_pte(pfn
, PAGE_KERNEL_EXEC
));
185 } while (pte
++, addr
+= PAGE_SIZE
, addr
!= end
);
188 static void __init
alloc_init_pmd(pud_t
*pud
, unsigned long addr
,
189 unsigned long end
, phys_addr_t phys
)
195 * Check for initial section mappings in the pgd/pud and remove them.
197 if (pud_none(*pud
) || pud_bad(*pud
)) {
198 pmd
= early_alloc(PTRS_PER_PMD
* sizeof(pmd_t
));
199 pud_populate(&init_mm
, pud
, pmd
);
202 pmd
= pmd_offset(pud
, addr
);
204 next
= pmd_addr_end(addr
, end
);
205 /* try section mapping first */
206 if (((addr
| next
| phys
) & ~SECTION_MASK
) == 0)
207 set_pmd(pmd
, __pmd(phys
| prot_sect_kernel
));
209 alloc_init_pte(pmd
, addr
, next
, __phys_to_pfn(phys
));
211 } while (pmd
++, addr
= next
, addr
!= end
);
214 static void __init
alloc_init_pud(pgd_t
*pgd
, unsigned long addr
,
215 unsigned long end
, unsigned long phys
)
217 pud_t
*pud
= pud_offset(pgd
, addr
);
221 next
= pud_addr_end(addr
, end
);
222 alloc_init_pmd(pud
, addr
, next
, phys
);
224 } while (pud
++, addr
= next
, addr
!= end
);
228 * Create the page directory entries and any necessary page tables for the
229 * mapping specified by 'md'.
231 static void __init
create_mapping(phys_addr_t phys
, unsigned long virt
,
234 unsigned long addr
, length
, end
, next
;
237 if (virt
< VMALLOC_START
) {
238 pr_warning("BUG: not creating mapping for 0x%016llx at 0x%016lx - outside kernel range\n",
243 addr
= virt
& PAGE_MASK
;
244 length
= PAGE_ALIGN(size
+ (virt
& ~PAGE_MASK
));
246 pgd
= pgd_offset_k(addr
);
249 next
= pgd_addr_end(addr
, end
);
250 alloc_init_pud(pgd
, addr
, next
, phys
);
252 } while (pgd
++, addr
= next
, addr
!= end
);
255 #ifdef CONFIG_EARLY_PRINTK
257 * Create an early I/O mapping using the pgd/pmd entries already populated
258 * in head.S as this function is called too early to allocated any memory. The
259 * mapping size is 2MB with 4KB pages or 64KB or 64KB pages.
261 void __iomem
* __init
early_io_map(phys_addr_t phys
, unsigned long virt
)
263 unsigned long size
, mask
;
264 bool page64k
= IS_ENABLED(CONFIG_ARM64_64K_PAGES
);
271 * No early pte entries with !ARM64_64K_PAGES configuration, so using
274 size
= page64k
? PAGE_SIZE
: SECTION_SIZE
;
277 pgd
= pgd_offset_k(virt
);
278 pud
= pud_offset(pgd
, virt
);
281 pmd
= pmd_offset(pud
, virt
);
286 pte
= pte_offset_kernel(pmd
, virt
);
287 set_pte(pte
, __pte((phys
& mask
) | PROT_DEVICE_nGnRE
));
289 set_pmd(pmd
, __pmd((phys
& mask
) | PROT_SECT_DEVICE_nGnRE
));
292 return (void __iomem
*)((virt
& mask
) + (phys
& ~mask
));
296 static void __init
map_mem(void)
298 struct memblock_region
*reg
;
300 /* map all the memory banks */
301 for_each_memblock(memory
, reg
) {
302 phys_addr_t start
= reg
->base
;
303 phys_addr_t end
= start
+ reg
->size
;
308 create_mapping(start
, __phys_to_virt(start
), end
- start
);
313 * paging_init() sets up the page tables, initialises the zone memory
314 * maps and sets up the zero page.
316 void __init
paging_init(void)
321 * Maximum PGDIR_SIZE addressable via the initial direct kernel
322 * mapping in swapper_pg_dir.
324 memblock_set_current_limit((PHYS_OFFSET
& PGDIR_MASK
) + PGDIR_SIZE
);
330 * Finally flush the caches and tlb to ensure that we're in a
336 /* allocate the zero page. */
337 zero_page
= early_alloc(PAGE_SIZE
);
341 empty_zero_page
= virt_to_page(zero_page
);
342 __flush_dcache_page(empty_zero_page
);
345 * TTBR0 is only used for the identity mapping at this stage. Make it
346 * point to zero page to avoid speculatively fetching new entries.
348 cpu_set_reserved_ttbr0();
353 * Enable the identity mapping to allow the MMU disabling.
355 void setup_mm_for_reboot(void)
357 cpu_switch_mm(idmap_pg_dir
, &init_mm
);
362 * Check whether a kernel address is valid (derived from arch/x86/).
364 int kern_addr_valid(unsigned long addr
)
371 if ((((long)addr
) >> VA_BITS
) != -1UL)
374 pgd
= pgd_offset_k(addr
);
378 pud
= pud_offset(pgd
, addr
);
382 pmd
= pmd_offset(pud
, addr
);
386 pte
= pte_offset_kernel(pmd
, addr
);
390 return pfn_valid(pte_pfn(*pte
));
392 #ifdef CONFIG_SPARSEMEM_VMEMMAP
393 #ifdef CONFIG_ARM64_64K_PAGES
394 int __meminit
vmemmap_populate(unsigned long start
, unsigned long end
, int node
)
396 return vmemmap_populate_basepages(start
, end
, node
);
398 #else /* !CONFIG_ARM64_64K_PAGES */
399 int __meminit
vmemmap_populate(unsigned long start
, unsigned long end
, int node
)
401 unsigned long addr
= start
;
408 next
= pmd_addr_end(addr
, end
);
410 pgd
= vmemmap_pgd_populate(addr
, node
);
414 pud
= vmemmap_pud_populate(pgd
, addr
, node
);
418 pmd
= pmd_offset(pud
, addr
);
419 if (pmd_none(*pmd
)) {
422 p
= vmemmap_alloc_block_buf(PMD_SIZE
, node
);
426 set_pmd(pmd
, __pmd(__pa(p
) | prot_sect_kernel
));
428 vmemmap_verify((pte_t
*)pmd
, node
, addr
, next
);
429 } while (addr
= next
, addr
!= end
);
433 #endif /* CONFIG_ARM64_64K_PAGES */
434 void vmemmap_free(unsigned long start
, unsigned long end
)
437 #endif /* CONFIG_SPARSEMEM_VMEMMAP */