staging: rtl8192u: remove redundant assignment to pointer crypt
[linux/fpc-iii.git] / arch / arm64 / include / asm / pgtable.h
blobe09760ece844fe55cbd39dc8c32910a7bfdb6708
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Copyright (C) 2012 ARM Ltd.
4 */
5 #ifndef __ASM_PGTABLE_H
6 #define __ASM_PGTABLE_H
8 #include <asm/bug.h>
9 #include <asm/proc-fns.h>
11 #include <asm/memory.h>
12 #include <asm/pgtable-hwdef.h>
13 #include <asm/pgtable-prot.h>
14 #include <asm/tlbflush.h>
17 * VMALLOC range.
19 * VMALLOC_START: beginning of the kernel vmalloc space
20 * VMALLOC_END: extends to the available space below vmmemmap, PCI I/O space
21 * and fixed mappings
23 #define VMALLOC_START (MODULES_END)
24 #define VMALLOC_END (PAGE_OFFSET - PUD_SIZE - VMEMMAP_SIZE - SZ_64K)
26 #define vmemmap ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT))
28 #define FIRST_USER_ADDRESS 0UL
30 #ifndef __ASSEMBLY__
32 #include <asm/cmpxchg.h>
33 #include <asm/fixmap.h>
34 #include <linux/mmdebug.h>
35 #include <linux/mm_types.h>
36 #include <linux/sched.h>
38 extern void __pte_error(const char *file, int line, unsigned long val);
39 extern void __pmd_error(const char *file, int line, unsigned long val);
40 extern void __pud_error(const char *file, int line, unsigned long val);
41 extern void __pgd_error(const char *file, int line, unsigned long val);
44 * ZERO_PAGE is a global shared page that is always zero: used
45 * for zero-mapped memory areas etc..
47 extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
48 #define ZERO_PAGE(vaddr) phys_to_page(__pa_symbol(empty_zero_page))
50 #define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte_val(pte))
53 * Macros to convert between a physical address and its placement in a
54 * page table entry, taking care of 52-bit addresses.
56 #ifdef CONFIG_ARM64_PA_BITS_52
57 #define __pte_to_phys(pte) \
58 ((pte_val(pte) & PTE_ADDR_LOW) | ((pte_val(pte) & PTE_ADDR_HIGH) << 36))
59 #define __phys_to_pte_val(phys) (((phys) | ((phys) >> 36)) & PTE_ADDR_MASK)
60 #else
61 #define __pte_to_phys(pte) (pte_val(pte) & PTE_ADDR_MASK)
62 #define __phys_to_pte_val(phys) (phys)
63 #endif
65 #define pte_pfn(pte) (__pte_to_phys(pte) >> PAGE_SHIFT)
66 #define pfn_pte(pfn,prot) \
67 __pte(__phys_to_pte_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
69 #define pte_none(pte) (!pte_val(pte))
70 #define pte_clear(mm,addr,ptep) set_pte(ptep, __pte(0))
71 #define pte_page(pte) (pfn_to_page(pte_pfn(pte)))
74 * The following only work if pte_present(). Undefined behaviour otherwise.
76 #define pte_present(pte) (!!(pte_val(pte) & (PTE_VALID | PTE_PROT_NONE)))
77 #define pte_young(pte) (!!(pte_val(pte) & PTE_AF))
78 #define pte_special(pte) (!!(pte_val(pte) & PTE_SPECIAL))
79 #define pte_write(pte) (!!(pte_val(pte) & PTE_WRITE))
80 #define pte_user_exec(pte) (!(pte_val(pte) & PTE_UXN))
81 #define pte_cont(pte) (!!(pte_val(pte) & PTE_CONT))
82 #define pte_devmap(pte) (!!(pte_val(pte) & PTE_DEVMAP))
84 #define pte_cont_addr_end(addr, end) \
85 ({ unsigned long __boundary = ((addr) + CONT_PTE_SIZE) & CONT_PTE_MASK; \
86 (__boundary - 1 < (end) - 1) ? __boundary : (end); \
89 #define pmd_cont_addr_end(addr, end) \
90 ({ unsigned long __boundary = ((addr) + CONT_PMD_SIZE) & CONT_PMD_MASK; \
91 (__boundary - 1 < (end) - 1) ? __boundary : (end); \
94 #define pte_hw_dirty(pte) (pte_write(pte) && !(pte_val(pte) & PTE_RDONLY))
95 #define pte_sw_dirty(pte) (!!(pte_val(pte) & PTE_DIRTY))
96 #define pte_dirty(pte) (pte_sw_dirty(pte) || pte_hw_dirty(pte))
98 #define pte_valid(pte) (!!(pte_val(pte) & PTE_VALID))
100 * Execute-only user mappings do not have the PTE_USER bit set. All valid
101 * kernel mappings have the PTE_UXN bit set.
103 #define pte_valid_not_user(pte) \
104 ((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_UXN)) == (PTE_VALID | PTE_UXN))
105 #define pte_valid_young(pte) \
106 ((pte_val(pte) & (PTE_VALID | PTE_AF)) == (PTE_VALID | PTE_AF))
107 #define pte_valid_user(pte) \
108 ((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER))
111 * Could the pte be present in the TLB? We must check mm_tlb_flush_pending
112 * so that we don't erroneously return false for pages that have been
113 * remapped as PROT_NONE but are yet to be flushed from the TLB.
115 #define pte_accessible(mm, pte) \
116 (mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid_young(pte))
119 * p??_access_permitted() is true for valid user mappings (subject to the
120 * write permission check) other than user execute-only which do not have the
121 * PTE_USER bit set. PROT_NONE mappings do not have the PTE_VALID bit set.
123 #define pte_access_permitted(pte, write) \
124 (pte_valid_user(pte) && (!(write) || pte_write(pte)))
125 #define pmd_access_permitted(pmd, write) \
126 (pte_access_permitted(pmd_pte(pmd), (write)))
127 #define pud_access_permitted(pud, write) \
128 (pte_access_permitted(pud_pte(pud), (write)))
130 static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot)
132 pte_val(pte) &= ~pgprot_val(prot);
133 return pte;
136 static inline pte_t set_pte_bit(pte_t pte, pgprot_t prot)
138 pte_val(pte) |= pgprot_val(prot);
139 return pte;
142 static inline pte_t pte_wrprotect(pte_t pte)
144 pte = clear_pte_bit(pte, __pgprot(PTE_WRITE));
145 pte = set_pte_bit(pte, __pgprot(PTE_RDONLY));
146 return pte;
149 static inline pte_t pte_mkwrite(pte_t pte)
151 pte = set_pte_bit(pte, __pgprot(PTE_WRITE));
152 pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY));
153 return pte;
156 static inline pte_t pte_mkclean(pte_t pte)
158 pte = clear_pte_bit(pte, __pgprot(PTE_DIRTY));
159 pte = set_pte_bit(pte, __pgprot(PTE_RDONLY));
161 return pte;
164 static inline pte_t pte_mkdirty(pte_t pte)
166 pte = set_pte_bit(pte, __pgprot(PTE_DIRTY));
168 if (pte_write(pte))
169 pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY));
171 return pte;
174 static inline pte_t pte_mkold(pte_t pte)
176 return clear_pte_bit(pte, __pgprot(PTE_AF));
179 static inline pte_t pte_mkyoung(pte_t pte)
181 return set_pte_bit(pte, __pgprot(PTE_AF));
184 static inline pte_t pte_mkspecial(pte_t pte)
186 return set_pte_bit(pte, __pgprot(PTE_SPECIAL));
189 static inline pte_t pte_mkcont(pte_t pte)
191 pte = set_pte_bit(pte, __pgprot(PTE_CONT));
192 return set_pte_bit(pte, __pgprot(PTE_TYPE_PAGE));
195 static inline pte_t pte_mknoncont(pte_t pte)
197 return clear_pte_bit(pte, __pgprot(PTE_CONT));
200 static inline pte_t pte_mkpresent(pte_t pte)
202 return set_pte_bit(pte, __pgprot(PTE_VALID));
205 static inline pmd_t pmd_mkcont(pmd_t pmd)
207 return __pmd(pmd_val(pmd) | PMD_SECT_CONT);
210 static inline pte_t pte_mkdevmap(pte_t pte)
212 return set_pte_bit(pte, __pgprot(PTE_DEVMAP | PTE_SPECIAL));
215 static inline void set_pte(pte_t *ptep, pte_t pte)
217 WRITE_ONCE(*ptep, pte);
220 * Only if the new pte is valid and kernel, otherwise TLB maintenance
221 * or update_mmu_cache() have the necessary barriers.
223 if (pte_valid_not_user(pte))
224 dsb(ishst);
227 extern void __sync_icache_dcache(pte_t pteval);
230 * PTE bits configuration in the presence of hardware Dirty Bit Management
231 * (PTE_WRITE == PTE_DBM):
233 * Dirty Writable | PTE_RDONLY PTE_WRITE PTE_DIRTY (sw)
234 * 0 0 | 1 0 0
235 * 0 1 | 1 1 0
236 * 1 0 | 1 0 1
237 * 1 1 | 0 1 x
239 * When hardware DBM is not present, the sofware PTE_DIRTY bit is updated via
240 * the page fault mechanism. Checking the dirty status of a pte becomes:
242 * PTE_DIRTY || (PTE_WRITE && !PTE_RDONLY)
245 static inline void __check_racy_pte_update(struct mm_struct *mm, pte_t *ptep,
246 pte_t pte)
248 pte_t old_pte;
250 if (!IS_ENABLED(CONFIG_DEBUG_VM))
251 return;
253 old_pte = READ_ONCE(*ptep);
255 if (!pte_valid(old_pte) || !pte_valid(pte))
256 return;
257 if (mm != current->active_mm && atomic_read(&mm->mm_users) <= 1)
258 return;
261 * Check for potential race with hardware updates of the pte
262 * (ptep_set_access_flags safely changes valid ptes without going
263 * through an invalid entry).
265 VM_WARN_ONCE(!pte_young(pte),
266 "%s: racy access flag clearing: 0x%016llx -> 0x%016llx",
267 __func__, pte_val(old_pte), pte_val(pte));
268 VM_WARN_ONCE(pte_write(old_pte) && !pte_dirty(pte),
269 "%s: racy dirty state clearing: 0x%016llx -> 0x%016llx",
270 __func__, pte_val(old_pte), pte_val(pte));
273 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
274 pte_t *ptep, pte_t pte)
276 if (pte_present(pte) && pte_user_exec(pte) && !pte_special(pte))
277 __sync_icache_dcache(pte);
279 __check_racy_pte_update(mm, ptep, pte);
281 set_pte(ptep, pte);
284 #define __HAVE_ARCH_PTE_SAME
285 static inline int pte_same(pte_t pte_a, pte_t pte_b)
287 pteval_t lhs, rhs;
289 lhs = pte_val(pte_a);
290 rhs = pte_val(pte_b);
292 if (pte_present(pte_a))
293 lhs &= ~PTE_RDONLY;
295 if (pte_present(pte_b))
296 rhs &= ~PTE_RDONLY;
298 return (lhs == rhs);
302 * Huge pte definitions.
304 #define pte_mkhuge(pte) (__pte(pte_val(pte) & ~PTE_TABLE_BIT))
307 * Hugetlb definitions.
309 #define HUGE_MAX_HSTATE 4
310 #define HPAGE_SHIFT PMD_SHIFT
311 #define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT)
312 #define HPAGE_MASK (~(HPAGE_SIZE - 1))
313 #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
315 static inline pte_t pgd_pte(pgd_t pgd)
317 return __pte(pgd_val(pgd));
320 static inline pte_t pud_pte(pud_t pud)
322 return __pte(pud_val(pud));
325 static inline pud_t pte_pud(pte_t pte)
327 return __pud(pte_val(pte));
330 static inline pmd_t pud_pmd(pud_t pud)
332 return __pmd(pud_val(pud));
335 static inline pte_t pmd_pte(pmd_t pmd)
337 return __pte(pmd_val(pmd));
340 static inline pmd_t pte_pmd(pte_t pte)
342 return __pmd(pte_val(pte));
345 static inline pgprot_t mk_pud_sect_prot(pgprot_t prot)
347 return __pgprot((pgprot_val(prot) & ~PUD_TABLE_BIT) | PUD_TYPE_SECT);
350 static inline pgprot_t mk_pmd_sect_prot(pgprot_t prot)
352 return __pgprot((pgprot_val(prot) & ~PMD_TABLE_BIT) | PMD_TYPE_SECT);
355 #ifdef CONFIG_NUMA_BALANCING
357 * See the comment in include/asm-generic/pgtable.h
359 static inline int pte_protnone(pte_t pte)
361 return (pte_val(pte) & (PTE_VALID | PTE_PROT_NONE)) == PTE_PROT_NONE;
364 static inline int pmd_protnone(pmd_t pmd)
366 return pte_protnone(pmd_pte(pmd));
368 #endif
371 * THP definitions.
374 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
375 #define pmd_trans_huge(pmd) (pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT))
376 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
378 #define pmd_present(pmd) pte_present(pmd_pte(pmd))
379 #define pmd_dirty(pmd) pte_dirty(pmd_pte(pmd))
380 #define pmd_young(pmd) pte_young(pmd_pte(pmd))
381 #define pmd_valid(pmd) pte_valid(pmd_pte(pmd))
382 #define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd)))
383 #define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd)))
384 #define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd)))
385 #define pmd_mkclean(pmd) pte_pmd(pte_mkclean(pmd_pte(pmd)))
386 #define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd)))
387 #define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd)))
388 #define pmd_mknotpresent(pmd) (__pmd(pmd_val(pmd) & ~PMD_SECT_VALID))
390 #define pmd_thp_or_huge(pmd) (pmd_huge(pmd) || pmd_trans_huge(pmd))
392 #define pmd_write(pmd) pte_write(pmd_pte(pmd))
394 #define pmd_mkhuge(pmd) (__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT))
396 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
397 #define pmd_devmap(pmd) pte_devmap(pmd_pte(pmd))
398 #endif
399 static inline pmd_t pmd_mkdevmap(pmd_t pmd)
401 return pte_pmd(set_pte_bit(pmd_pte(pmd), __pgprot(PTE_DEVMAP)));
404 #define __pmd_to_phys(pmd) __pte_to_phys(pmd_pte(pmd))
405 #define __phys_to_pmd_val(phys) __phys_to_pte_val(phys)
406 #define pmd_pfn(pmd) ((__pmd_to_phys(pmd) & PMD_MASK) >> PAGE_SHIFT)
407 #define pfn_pmd(pfn,prot) __pmd(__phys_to_pmd_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
408 #define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot)
410 #define pud_young(pud) pte_young(pud_pte(pud))
411 #define pud_mkyoung(pud) pte_pud(pte_mkyoung(pud_pte(pud)))
412 #define pud_write(pud) pte_write(pud_pte(pud))
414 #define pud_mkhuge(pud) (__pud(pud_val(pud) & ~PUD_TABLE_BIT))
416 #define __pud_to_phys(pud) __pte_to_phys(pud_pte(pud))
417 #define __phys_to_pud_val(phys) __phys_to_pte_val(phys)
418 #define pud_pfn(pud) ((__pud_to_phys(pud) & PUD_MASK) >> PAGE_SHIFT)
419 #define pfn_pud(pfn,prot) __pud(__phys_to_pud_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
421 #define set_pmd_at(mm, addr, pmdp, pmd) set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd))
423 #define __pgd_to_phys(pgd) __pte_to_phys(pgd_pte(pgd))
424 #define __phys_to_pgd_val(phys) __phys_to_pte_val(phys)
426 #define __pgprot_modify(prot,mask,bits) \
427 __pgprot((pgprot_val(prot) & ~(mask)) | (bits))
430 * Mark the prot value as uncacheable and unbufferable.
432 #define pgprot_noncached(prot) \
433 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE) | PTE_PXN | PTE_UXN)
434 #define pgprot_writecombine(prot) \
435 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN)
436 #define pgprot_device(prot) \
437 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_PXN | PTE_UXN)
438 #define __HAVE_PHYS_MEM_ACCESS_PROT
439 struct file;
440 extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
441 unsigned long size, pgprot_t vma_prot);
443 #define pmd_none(pmd) (!pmd_val(pmd))
445 #define pmd_bad(pmd) (!(pmd_val(pmd) & PMD_TABLE_BIT))
447 #define pmd_table(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
448 PMD_TYPE_TABLE)
449 #define pmd_sect(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
450 PMD_TYPE_SECT)
452 #if defined(CONFIG_ARM64_64K_PAGES) || CONFIG_PGTABLE_LEVELS < 3
453 static inline bool pud_sect(pud_t pud) { return false; }
454 static inline bool pud_table(pud_t pud) { return true; }
455 #else
456 #define pud_sect(pud) ((pud_val(pud) & PUD_TYPE_MASK) == \
457 PUD_TYPE_SECT)
458 #define pud_table(pud) ((pud_val(pud) & PUD_TYPE_MASK) == \
459 PUD_TYPE_TABLE)
460 #endif
462 extern pgd_t init_pg_dir[PTRS_PER_PGD];
463 extern pgd_t init_pg_end[];
464 extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
465 extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
466 extern pgd_t tramp_pg_dir[PTRS_PER_PGD];
468 extern void set_swapper_pgd(pgd_t *pgdp, pgd_t pgd);
470 static inline bool in_swapper_pgdir(void *addr)
472 return ((unsigned long)addr & PAGE_MASK) ==
473 ((unsigned long)swapper_pg_dir & PAGE_MASK);
476 static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
478 #ifdef __PAGETABLE_PMD_FOLDED
479 if (in_swapper_pgdir(pmdp)) {
480 set_swapper_pgd((pgd_t *)pmdp, __pgd(pmd_val(pmd)));
481 return;
483 #endif /* __PAGETABLE_PMD_FOLDED */
485 WRITE_ONCE(*pmdp, pmd);
487 if (pmd_valid(pmd))
488 dsb(ishst);
491 static inline void pmd_clear(pmd_t *pmdp)
493 set_pmd(pmdp, __pmd(0));
496 static inline phys_addr_t pmd_page_paddr(pmd_t pmd)
498 return __pmd_to_phys(pmd);
501 static inline void pte_unmap(pte_t *pte) { }
503 /* Find an entry in the third-level page table. */
504 #define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
506 #define pte_offset_phys(dir,addr) (pmd_page_paddr(READ_ONCE(*(dir))) + pte_index(addr) * sizeof(pte_t))
507 #define pte_offset_kernel(dir,addr) ((pte_t *)__va(pte_offset_phys((dir), (addr))))
509 #define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr))
511 #define pte_set_fixmap(addr) ((pte_t *)set_fixmap_offset(FIX_PTE, addr))
512 #define pte_set_fixmap_offset(pmd, addr) pte_set_fixmap(pte_offset_phys(pmd, addr))
513 #define pte_clear_fixmap() clear_fixmap(FIX_PTE)
515 #define pmd_page(pmd) pfn_to_page(__phys_to_pfn(__pmd_to_phys(pmd)))
517 /* use ONLY for statically allocated translation tables */
518 #define pte_offset_kimg(dir,addr) ((pte_t *)__phys_to_kimg(pte_offset_phys((dir), (addr))))
521 * Conversion functions: convert a page and protection to a page entry,
522 * and a page entry and page directory to the page they refer to.
524 #define mk_pte(page,prot) pfn_pte(page_to_pfn(page),prot)
526 #if CONFIG_PGTABLE_LEVELS > 2
528 #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd_val(pmd))
530 #define pud_none(pud) (!pud_val(pud))
531 #define pud_bad(pud) (!(pud_val(pud) & PUD_TABLE_BIT))
532 #define pud_present(pud) pte_present(pud_pte(pud))
533 #define pud_valid(pud) pte_valid(pud_pte(pud))
535 static inline void set_pud(pud_t *pudp, pud_t pud)
537 #ifdef __PAGETABLE_PUD_FOLDED
538 if (in_swapper_pgdir(pudp)) {
539 set_swapper_pgd((pgd_t *)pudp, __pgd(pud_val(pud)));
540 return;
542 #endif /* __PAGETABLE_PUD_FOLDED */
544 WRITE_ONCE(*pudp, pud);
546 if (pud_valid(pud))
547 dsb(ishst);
550 static inline void pud_clear(pud_t *pudp)
552 set_pud(pudp, __pud(0));
555 static inline phys_addr_t pud_page_paddr(pud_t pud)
557 return __pud_to_phys(pud);
560 /* Find an entry in the second-level page table. */
561 #define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
563 #define pmd_offset_phys(dir, addr) (pud_page_paddr(READ_ONCE(*(dir))) + pmd_index(addr) * sizeof(pmd_t))
564 #define pmd_offset(dir, addr) ((pmd_t *)__va(pmd_offset_phys((dir), (addr))))
566 #define pmd_set_fixmap(addr) ((pmd_t *)set_fixmap_offset(FIX_PMD, addr))
567 #define pmd_set_fixmap_offset(pud, addr) pmd_set_fixmap(pmd_offset_phys(pud, addr))
568 #define pmd_clear_fixmap() clear_fixmap(FIX_PMD)
570 #define pud_page(pud) pfn_to_page(__phys_to_pfn(__pud_to_phys(pud)))
572 /* use ONLY for statically allocated translation tables */
573 #define pmd_offset_kimg(dir,addr) ((pmd_t *)__phys_to_kimg(pmd_offset_phys((dir), (addr))))
575 #else
577 #define pud_page_paddr(pud) ({ BUILD_BUG(); 0; })
579 /* Match pmd_offset folding in <asm/generic/pgtable-nopmd.h> */
580 #define pmd_set_fixmap(addr) NULL
581 #define pmd_set_fixmap_offset(pudp, addr) ((pmd_t *)pudp)
582 #define pmd_clear_fixmap()
584 #define pmd_offset_kimg(dir,addr) ((pmd_t *)dir)
586 #endif /* CONFIG_PGTABLE_LEVELS > 2 */
588 #if CONFIG_PGTABLE_LEVELS > 3
590 #define pud_ERROR(pud) __pud_error(__FILE__, __LINE__, pud_val(pud))
592 #define pgd_none(pgd) (!pgd_val(pgd))
593 #define pgd_bad(pgd) (!(pgd_val(pgd) & 2))
594 #define pgd_present(pgd) (pgd_val(pgd))
596 static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
598 if (in_swapper_pgdir(pgdp)) {
599 set_swapper_pgd(pgdp, pgd);
600 return;
603 WRITE_ONCE(*pgdp, pgd);
604 dsb(ishst);
607 static inline void pgd_clear(pgd_t *pgdp)
609 set_pgd(pgdp, __pgd(0));
612 static inline phys_addr_t pgd_page_paddr(pgd_t pgd)
614 return __pgd_to_phys(pgd);
617 /* Find an entry in the frst-level page table. */
618 #define pud_index(addr) (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
620 #define pud_offset_phys(dir, addr) (pgd_page_paddr(READ_ONCE(*(dir))) + pud_index(addr) * sizeof(pud_t))
621 #define pud_offset(dir, addr) ((pud_t *)__va(pud_offset_phys((dir), (addr))))
623 #define pud_set_fixmap(addr) ((pud_t *)set_fixmap_offset(FIX_PUD, addr))
624 #define pud_set_fixmap_offset(pgd, addr) pud_set_fixmap(pud_offset_phys(pgd, addr))
625 #define pud_clear_fixmap() clear_fixmap(FIX_PUD)
627 #define pgd_page(pgd) pfn_to_page(__phys_to_pfn(__pgd_to_phys(pgd)))
629 /* use ONLY for statically allocated translation tables */
630 #define pud_offset_kimg(dir,addr) ((pud_t *)__phys_to_kimg(pud_offset_phys((dir), (addr))))
632 #else
634 #define pgd_page_paddr(pgd) ({ BUILD_BUG(); 0;})
636 /* Match pud_offset folding in <asm/generic/pgtable-nopud.h> */
637 #define pud_set_fixmap(addr) NULL
638 #define pud_set_fixmap_offset(pgdp, addr) ((pud_t *)pgdp)
639 #define pud_clear_fixmap()
641 #define pud_offset_kimg(dir,addr) ((pud_t *)dir)
643 #endif /* CONFIG_PGTABLE_LEVELS > 3 */
645 #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd_val(pgd))
647 /* to find an entry in a page-table-directory */
648 #define pgd_index(addr) (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
650 #define pgd_offset_raw(pgd, addr) ((pgd) + pgd_index(addr))
652 #define pgd_offset(mm, addr) (pgd_offset_raw((mm)->pgd, (addr)))
654 /* to find an entry in a kernel page-table-directory */
655 #define pgd_offset_k(addr) pgd_offset(&init_mm, addr)
657 #define pgd_set_fixmap(addr) ((pgd_t *)set_fixmap_offset(FIX_PGD, addr))
658 #define pgd_clear_fixmap() clear_fixmap(FIX_PGD)
660 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
662 const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY |
663 PTE_PROT_NONE | PTE_VALID | PTE_WRITE;
664 /* preserve the hardware dirty information */
665 if (pte_hw_dirty(pte))
666 pte = pte_mkdirty(pte);
667 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
668 return pte;
671 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
673 return pte_pmd(pte_modify(pmd_pte(pmd), newprot));
676 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
677 extern int ptep_set_access_flags(struct vm_area_struct *vma,
678 unsigned long address, pte_t *ptep,
679 pte_t entry, int dirty);
681 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
682 #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
683 static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
684 unsigned long address, pmd_t *pmdp,
685 pmd_t entry, int dirty)
687 return ptep_set_access_flags(vma, address, (pte_t *)pmdp, pmd_pte(entry), dirty);
690 static inline int pud_devmap(pud_t pud)
692 return 0;
695 static inline int pgd_devmap(pgd_t pgd)
697 return 0;
699 #endif
702 * Atomic pte/pmd modifications.
704 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
705 static inline int __ptep_test_and_clear_young(pte_t *ptep)
707 pte_t old_pte, pte;
709 pte = READ_ONCE(*ptep);
710 do {
711 old_pte = pte;
712 pte = pte_mkold(pte);
713 pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep),
714 pte_val(old_pte), pte_val(pte));
715 } while (pte_val(pte) != pte_val(old_pte));
717 return pte_young(pte);
720 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
721 unsigned long address,
722 pte_t *ptep)
724 return __ptep_test_and_clear_young(ptep);
727 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
728 static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
729 unsigned long address, pte_t *ptep)
731 int young = ptep_test_and_clear_young(vma, address, ptep);
733 if (young) {
735 * We can elide the trailing DSB here since the worst that can
736 * happen is that a CPU continues to use the young entry in its
737 * TLB and we mistakenly reclaim the associated page. The
738 * window for such an event is bounded by the next
739 * context-switch, which provides a DSB to complete the TLB
740 * invalidation.
742 flush_tlb_page_nosync(vma, address);
745 return young;
748 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
749 #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
750 static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
751 unsigned long address,
752 pmd_t *pmdp)
754 return ptep_test_and_clear_young(vma, address, (pte_t *)pmdp);
756 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
758 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
759 static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
760 unsigned long address, pte_t *ptep)
762 return __pte(xchg_relaxed(&pte_val(*ptep), 0));
765 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
766 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
767 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
768 unsigned long address, pmd_t *pmdp)
770 return pte_pmd(ptep_get_and_clear(mm, address, (pte_t *)pmdp));
772 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
775 * ptep_set_wrprotect - mark read-only while trasferring potential hardware
776 * dirty status (PTE_DBM && !PTE_RDONLY) to the software PTE_DIRTY bit.
778 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
779 static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep)
781 pte_t old_pte, pte;
783 pte = READ_ONCE(*ptep);
784 do {
785 old_pte = pte;
787 * If hardware-dirty (PTE_WRITE/DBM bit set and PTE_RDONLY
788 * clear), set the PTE_DIRTY bit.
790 if (pte_hw_dirty(pte))
791 pte = pte_mkdirty(pte);
792 pte = pte_wrprotect(pte);
793 pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep),
794 pte_val(old_pte), pte_val(pte));
795 } while (pte_val(pte) != pte_val(old_pte));
798 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
799 #define __HAVE_ARCH_PMDP_SET_WRPROTECT
800 static inline void pmdp_set_wrprotect(struct mm_struct *mm,
801 unsigned long address, pmd_t *pmdp)
803 ptep_set_wrprotect(mm, address, (pte_t *)pmdp);
806 #define pmdp_establish pmdp_establish
807 static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
808 unsigned long address, pmd_t *pmdp, pmd_t pmd)
810 return __pmd(xchg_relaxed(&pmd_val(*pmdp), pmd_val(pmd)));
812 #endif
815 * Encode and decode a swap entry:
816 * bits 0-1: present (must be zero)
817 * bits 2-7: swap type
818 * bits 8-57: swap offset
819 * bit 58: PTE_PROT_NONE (must be zero)
821 #define __SWP_TYPE_SHIFT 2
822 #define __SWP_TYPE_BITS 6
823 #define __SWP_OFFSET_BITS 50
824 #define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1)
825 #define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
826 #define __SWP_OFFSET_MASK ((1UL << __SWP_OFFSET_BITS) - 1)
828 #define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
829 #define __swp_offset(x) (((x).val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK)
830 #define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) })
832 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
833 #define __swp_entry_to_pte(swp) ((pte_t) { (swp).val })
836 * Ensure that there are not more swap files than can be encoded in the kernel
837 * PTEs.
839 #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
841 extern int kern_addr_valid(unsigned long addr);
843 #include <asm-generic/pgtable.h>
845 static inline void pgtable_cache_init(void) { }
848 * On AArch64, the cache coherency is handled via the set_pte_at() function.
850 static inline void update_mmu_cache(struct vm_area_struct *vma,
851 unsigned long addr, pte_t *ptep)
854 * We don't do anything here, so there's a very small chance of
855 * us retaking a user fault which we just fixed up. The alternative
856 * is doing a dsb(ishst), but that penalises the fastpath.
860 #define update_mmu_cache_pmd(vma, address, pmd) do { } while (0)
862 #define kc_vaddr_to_offset(v) ((v) & ~VA_START)
863 #define kc_offset_to_vaddr(o) ((o) | VA_START)
865 #ifdef CONFIG_ARM64_PA_BITS_52
866 #define phys_to_ttbr(addr) (((addr) | ((addr) >> 46)) & TTBR_BADDR_MASK_52)
867 #else
868 #define phys_to_ttbr(addr) (addr)
869 #endif
871 #endif /* !__ASSEMBLY__ */
873 #endif /* __ASM_PGTABLE_H */