1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
6 * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 2003 Ralf Baechle
7 * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc.
10 #define _ASM_PGTABLE_H
12 #include <linux/compiler.h>
13 #include <asm/addrspace.h>
15 #include <asm/pgtable-bits.h>
17 #if CONFIG_PGTABLE_LEVELS == 2
18 #include <asm-generic/pgtable-nopmd.h>
19 #elif CONFIG_PGTABLE_LEVELS == 3
20 #include <asm-generic/pgtable-nopud.h>
22 #include <asm-generic/pgtable-nop4d.h>
25 #if CONFIG_PGTABLE_LEVELS == 2
26 #define PGDIR_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - 3))
27 #elif CONFIG_PGTABLE_LEVELS == 3
28 #define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - 3))
29 #define PMD_SIZE (1UL << PMD_SHIFT)
30 #define PMD_MASK (~(PMD_SIZE-1))
31 #define PGDIR_SHIFT (PMD_SHIFT + (PAGE_SHIFT - 3))
32 #elif CONFIG_PGTABLE_LEVELS == 4
33 #define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - 3))
34 #define PMD_SIZE (1UL << PMD_SHIFT)
35 #define PMD_MASK (~(PMD_SIZE-1))
36 #define PUD_SHIFT (PMD_SHIFT + (PAGE_SHIFT - 3))
37 #define PUD_SIZE (1UL << PUD_SHIFT)
38 #define PUD_MASK (~(PUD_SIZE-1))
39 #define PGDIR_SHIFT (PUD_SHIFT + (PAGE_SHIFT - 3))
42 #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
43 #define PGDIR_MASK (~(PGDIR_SIZE-1))
45 #define VA_BITS (PGDIR_SHIFT + (PAGE_SHIFT - 3))
47 #define PTRS_PER_PGD (PAGE_SIZE >> 3)
48 #if CONFIG_PGTABLE_LEVELS > 3
49 #define PTRS_PER_PUD (PAGE_SIZE >> 3)
51 #if CONFIG_PGTABLE_LEVELS > 2
52 #define PTRS_PER_PMD (PAGE_SIZE >> 3)
54 #define PTRS_PER_PTE (PAGE_SIZE >> 3)
56 #define USER_PTRS_PER_PGD ((TASK_SIZE64 / PGDIR_SIZE)?(TASK_SIZE64 / PGDIR_SIZE):1)
60 #include <linux/mm_types.h>
61 #include <linux/mmzone.h>
62 #include <asm/fixmap.h>
63 #include <asm/sparsemem.h>
66 struct vm_area_struct
;
69 * ZERO_PAGE is a global shared page that is always zero; used
70 * for zero-mapped memory areas etc..
73 extern unsigned long empty_zero_page
[PAGE_SIZE
/ sizeof(unsigned long)];
75 #define ZERO_PAGE(vaddr) virt_to_page(empty_zero_page)
78 * TLB refill handlers may also map the vmalloc area into xkvrange.
79 * Avoid the first couple of pages so NULL pointer dereferences will
80 * still reliably trap.
82 #define MODULES_VADDR (vm_map_base + PCI_IOSIZE + (2 * PAGE_SIZE))
83 #define MODULES_END (MODULES_VADDR + SZ_256M)
86 #define KFENCE_AREA_SIZE (((CONFIG_KFENCE_NUM_OBJECTS + 1) * 2 + 2) * PAGE_SIZE)
88 #define KFENCE_AREA_SIZE 0
91 #define VMALLOC_START MODULES_END
96 min(PTRS_PER_PGD * PTRS_PER_PUD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, (1UL << cpu_vabits)) - PMD_SIZE - VMEMMAP_SIZE - KFENCE_AREA_SIZE)
100 min(PTRS_PER_PGD * PTRS_PER_PUD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, (1UL << cpu_vabits) / 2) - PMD_SIZE - VMEMMAP_SIZE - KFENCE_AREA_SIZE)
103 #define vmemmap ((struct page *)((VMALLOC_END + PMD_SIZE) & PMD_MASK))
104 #define VMEMMAP_END ((unsigned long)vmemmap + VMEMMAP_SIZE - 1)
106 #define KFENCE_AREA_START (VMEMMAP_END + 1)
107 #define KFENCE_AREA_END (KFENCE_AREA_START + KFENCE_AREA_SIZE - 1)
109 #define ptep_get(ptep) READ_ONCE(*(ptep))
110 #define pmdp_get(pmdp) READ_ONCE(*(pmdp))
112 #define pte_ERROR(e) \
113 pr_err("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
114 #ifndef __PAGETABLE_PMD_FOLDED
115 #define pmd_ERROR(e) \
116 pr_err("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
118 #ifndef __PAGETABLE_PUD_FOLDED
119 #define pud_ERROR(e) \
120 pr_err("%s:%d: bad pud %016lx.\n", __FILE__, __LINE__, pud_val(e))
122 #define pgd_ERROR(e) \
123 pr_err("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
125 extern pte_t invalid_pte_table
[PTRS_PER_PTE
];
127 #ifndef __PAGETABLE_PUD_FOLDED
129 typedef struct { unsigned long pud
; } pud_t
;
130 #define pud_val(x) ((x).pud)
131 #define __pud(x) ((pud_t) { (x) })
133 extern pud_t invalid_pud_table
[PTRS_PER_PUD
];
136 * Empty pgd/p4d entries point to the invalid_pud_table.
138 static inline int p4d_none(p4d_t p4d
)
140 return p4d_val(p4d
) == (unsigned long)invalid_pud_table
;
143 static inline int p4d_bad(p4d_t p4d
)
145 return p4d_val(p4d
) & ~PAGE_MASK
;
148 static inline int p4d_present(p4d_t p4d
)
150 return p4d_val(p4d
) != (unsigned long)invalid_pud_table
;
153 static inline pud_t
*p4d_pgtable(p4d_t p4d
)
155 return (pud_t
*)p4d_val(p4d
);
158 static inline void set_p4d(p4d_t
*p4d
, p4d_t p4dval
)
160 WRITE_ONCE(*p4d
, p4dval
);
163 static inline void p4d_clear(p4d_t
*p4dp
)
165 set_p4d(p4dp
, __p4d((unsigned long)invalid_pud_table
));
168 #define p4d_phys(p4d) PHYSADDR(p4d_val(p4d))
169 #define p4d_page(p4d) (pfn_to_page(p4d_phys(p4d) >> PAGE_SHIFT))
173 #ifndef __PAGETABLE_PMD_FOLDED
175 typedef struct { unsigned long pmd
; } pmd_t
;
176 #define pmd_val(x) ((x).pmd)
177 #define __pmd(x) ((pmd_t) { (x) })
179 extern pmd_t invalid_pmd_table
[PTRS_PER_PMD
];
182 * Empty pud entries point to the invalid_pmd_table.
184 static inline int pud_none(pud_t pud
)
186 return pud_val(pud
) == (unsigned long)invalid_pmd_table
;
189 static inline int pud_bad(pud_t pud
)
191 return pud_val(pud
) & ~PAGE_MASK
;
194 static inline int pud_present(pud_t pud
)
196 return pud_val(pud
) != (unsigned long)invalid_pmd_table
;
199 static inline pmd_t
*pud_pgtable(pud_t pud
)
201 return (pmd_t
*)pud_val(pud
);
204 static inline void set_pud(pud_t
*pud
, pud_t pudval
)
206 WRITE_ONCE(*pud
, pudval
);
209 static inline void pud_clear(pud_t
*pudp
)
211 set_pud(pudp
, __pud((unsigned long)invalid_pmd_table
));
214 #define pud_phys(pud) PHYSADDR(pud_val(pud))
215 #define pud_page(pud) (pfn_to_page(pud_phys(pud) >> PAGE_SHIFT))
220 * Empty pmd entries point to the invalid_pte_table.
222 static inline int pmd_none(pmd_t pmd
)
224 return pmd_val(pmd
) == (unsigned long)invalid_pte_table
;
227 static inline int pmd_bad(pmd_t pmd
)
229 return (pmd_val(pmd
) & ~PAGE_MASK
);
232 static inline int pmd_present(pmd_t pmd
)
234 if (unlikely(pmd_val(pmd
) & _PAGE_HUGE
))
235 return !!(pmd_val(pmd
) & (_PAGE_PRESENT
| _PAGE_PROTNONE
| _PAGE_PRESENT_INVALID
));
237 return pmd_val(pmd
) != (unsigned long)invalid_pte_table
;
240 static inline void set_pmd(pmd_t
*pmd
, pmd_t pmdval
)
242 WRITE_ONCE(*pmd
, pmdval
);
245 static inline void pmd_clear(pmd_t
*pmdp
)
247 set_pmd(pmdp
, __pmd((unsigned long)invalid_pte_table
));
250 #define pmd_phys(pmd) PHYSADDR(pmd_val(pmd))
252 #ifndef CONFIG_TRANSPARENT_HUGEPAGE
253 #define pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT))
254 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
256 #define pmd_page_vaddr(pmd) pmd_val(pmd)
258 extern pmd_t
mk_pmd(struct page
*page
, pgprot_t prot
);
259 extern void set_pmd_at(struct mm_struct
*mm
, unsigned long addr
, pmd_t
*pmdp
, pmd_t pmd
);
261 #define pte_page(x) pfn_to_page(pte_pfn(x))
262 #define pte_pfn(x) ((unsigned long)(((x).pte & _PFN_MASK) >> PFN_PTE_SHIFT))
263 #define pfn_pte(pfn, prot) __pte(((pfn) << PFN_PTE_SHIFT) | pgprot_val(prot))
264 #define pfn_pmd(pfn, prot) __pmd(((pfn) << PFN_PTE_SHIFT) | pgprot_val(prot))
267 * Initialize a new pgd / pud / pmd table with invalid pointers.
269 extern void pgd_init(void *addr
);
270 extern void pud_init(void *addr
);
271 #define pud_init pud_init
272 extern void pmd_init(void *addr
);
273 #define pmd_init pmd_init
274 extern void kernel_pte_init(void *addr
);
275 #define kernel_pte_init kernel_pte_init
278 * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that
279 * are !pte_none() && !pte_present().
281 * Format of swap PTEs:
283 * 6 6 6 6 5 5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 3 3 3 3 3 3 3 3
284 * 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2
285 * <--------------------------- offset ---------------------------
287 * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
288 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
289 * --------------> E <--- type ---> <---------- zeroes ---------->
291 * E is the exclusive marker that is not stored in swap entries.
292 * The zero'ed bits include _PAGE_PRESENT and _PAGE_PROTNONE.
294 static inline pte_t
mk_swap_pte(unsigned long type
, unsigned long offset
)
295 { pte_t pte
; pte_val(pte
) = ((type
& 0x7f) << 16) | (offset
<< 24); return pte
; }
297 #define __swp_type(x) (((x).val >> 16) & 0x7f)
298 #define __swp_offset(x) ((x).val >> 24)
299 #define __swp_entry(type, offset) ((swp_entry_t) { pte_val(mk_swap_pte((type), (offset))) })
300 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
301 #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
302 #define __pmd_to_swp_entry(pmd) ((swp_entry_t) { pmd_val(pmd) })
303 #define __swp_entry_to_pmd(x) ((pmd_t) { (x).val | _PAGE_HUGE })
305 static inline int pte_swp_exclusive(pte_t pte
)
307 return pte_val(pte
) & _PAGE_SWP_EXCLUSIVE
;
310 static inline pte_t
pte_swp_mkexclusive(pte_t pte
)
312 pte_val(pte
) |= _PAGE_SWP_EXCLUSIVE
;
316 static inline pte_t
pte_swp_clear_exclusive(pte_t pte
)
318 pte_val(pte
) &= ~_PAGE_SWP_EXCLUSIVE
;
322 extern void paging_init(void);
324 #define pte_none(pte) (!(pte_val(pte) & ~_PAGE_GLOBAL))
325 #define pte_present(pte) (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROTNONE))
326 #define pte_no_exec(pte) (pte_val(pte) & _PAGE_NO_EXEC)
328 static inline void set_pte(pte_t
*ptep
, pte_t pteval
)
330 WRITE_ONCE(*ptep
, pteval
);
333 if (pte_val(pteval
) & _PAGE_GLOBAL
)
334 DBAR(0b11000); /* o_wrw = 0b11000 */
338 static inline void pte_clear(struct mm_struct
*mm
, unsigned long addr
, pte_t
*ptep
)
340 pte_t pte
= ptep_get(ptep
);
341 pte_val(pte
) &= _PAGE_GLOBAL
;
345 #define PGD_T_LOG2 (__builtin_ffs(sizeof(pgd_t)) - 1)
346 #define PMD_T_LOG2 (__builtin_ffs(sizeof(pmd_t)) - 1)
347 #define PTE_T_LOG2 (__builtin_ffs(sizeof(pte_t)) - 1)
349 extern pgd_t swapper_pg_dir
[];
350 extern pgd_t invalid_pg_dir
[];
353 * The following only work if pte_present() is true.
354 * Undefined behaviour if not..
356 static inline int pte_write(pte_t pte
) { return pte_val(pte
) & _PAGE_WRITE
; }
357 static inline int pte_young(pte_t pte
) { return pte_val(pte
) & _PAGE_ACCESSED
; }
358 static inline int pte_dirty(pte_t pte
) { return pte_val(pte
) & (_PAGE_DIRTY
| _PAGE_MODIFIED
); }
360 static inline pte_t
pte_mkold(pte_t pte
)
362 pte_val(pte
) &= ~_PAGE_ACCESSED
;
366 static inline pte_t
pte_mkyoung(pte_t pte
)
368 pte_val(pte
) |= _PAGE_ACCESSED
;
372 static inline pte_t
pte_mkclean(pte_t pte
)
374 pte_val(pte
) &= ~(_PAGE_DIRTY
| _PAGE_MODIFIED
);
378 static inline pte_t
pte_mkdirty(pte_t pte
)
380 pte_val(pte
) |= _PAGE_MODIFIED
;
381 if (pte_val(pte
) & _PAGE_WRITE
)
382 pte_val(pte
) |= _PAGE_DIRTY
;
386 static inline pte_t
pte_mkwrite_novma(pte_t pte
)
388 pte_val(pte
) |= _PAGE_WRITE
;
389 if (pte_val(pte
) & _PAGE_MODIFIED
)
390 pte_val(pte
) |= _PAGE_DIRTY
;
394 static inline pte_t
pte_wrprotect(pte_t pte
)
396 pte_val(pte
) &= ~(_PAGE_WRITE
| _PAGE_DIRTY
);
400 static inline int pte_huge(pte_t pte
) { return pte_val(pte
) & _PAGE_HUGE
; }
402 static inline pte_t
pte_mkhuge(pte_t pte
)
404 pte_val(pte
) |= _PAGE_HUGE
;
408 #if defined(CONFIG_ARCH_HAS_PTE_SPECIAL)
409 static inline int pte_special(pte_t pte
) { return pte_val(pte
) & _PAGE_SPECIAL
; }
410 static inline pte_t
pte_mkspecial(pte_t pte
) { pte_val(pte
) |= _PAGE_SPECIAL
; return pte
; }
411 #endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */
413 static inline int pte_devmap(pte_t pte
) { return !!(pte_val(pte
) & _PAGE_DEVMAP
); }
414 static inline pte_t
pte_mkdevmap(pte_t pte
) { pte_val(pte
) |= _PAGE_DEVMAP
; return pte
; }
416 #define pte_accessible pte_accessible
417 static inline unsigned long pte_accessible(struct mm_struct
*mm
, pte_t a
)
419 if (pte_val(a
) & _PAGE_PRESENT
)
422 if ((pte_val(a
) & _PAGE_PROTNONE
) &&
423 atomic_read(&mm
->tlb_flush_pending
))
430 * Conversion functions: convert a page and protection to a page entry,
431 * and a page entry and page directory to the page they refer to.
433 #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
435 static inline pte_t
pte_modify(pte_t pte
, pgprot_t newprot
)
437 return __pte((pte_val(pte
) & _PAGE_CHG_MASK
) |
438 (pgprot_val(newprot
) & ~_PAGE_CHG_MASK
));
441 extern void __update_tlb(struct vm_area_struct
*vma
,
442 unsigned long address
, pte_t
*ptep
);
444 static inline void update_mmu_cache_range(struct vm_fault
*vmf
,
445 struct vm_area_struct
*vma
, unsigned long address
,
446 pte_t
*ptep
, unsigned int nr
)
449 __update_tlb(vma
, address
, ptep
);
452 address
+= PAGE_SIZE
;
456 #define update_mmu_cache(vma, addr, ptep) \
457 update_mmu_cache_range(NULL, vma, addr, ptep, 1)
459 #define update_mmu_tlb_range(vma, addr, ptep, nr) \
460 update_mmu_cache_range(NULL, vma, addr, ptep, nr)
462 static inline void update_mmu_cache_pmd(struct vm_area_struct
*vma
,
463 unsigned long address
, pmd_t
*pmdp
)
465 __update_tlb(vma
, address
, (pte_t
*)pmdp
);
468 static inline unsigned long pmd_pfn(pmd_t pmd
)
470 return (pmd_val(pmd
) & _PFN_MASK
) >> PFN_PTE_SHIFT
;
473 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
475 /* We don't have hardware dirty/accessed bits, generic_pmdp_establish is fine.*/
476 #define pmdp_establish generic_pmdp_establish
478 static inline int pmd_trans_huge(pmd_t pmd
)
480 return !!(pmd_val(pmd
) & _PAGE_HUGE
) && pmd_present(pmd
);
483 static inline pmd_t
pmd_mkhuge(pmd_t pmd
)
485 pmd_val(pmd
) = (pmd_val(pmd
) & ~(_PAGE_GLOBAL
)) |
486 ((pmd_val(pmd
) & _PAGE_GLOBAL
) << (_PAGE_HGLOBAL_SHIFT
- _PAGE_GLOBAL_SHIFT
));
487 pmd_val(pmd
) |= _PAGE_HUGE
;
492 #define pmd_write pmd_write
493 static inline int pmd_write(pmd_t pmd
)
495 return !!(pmd_val(pmd
) & _PAGE_WRITE
);
498 static inline pmd_t
pmd_mkwrite_novma(pmd_t pmd
)
500 pmd_val(pmd
) |= _PAGE_WRITE
;
501 if (pmd_val(pmd
) & _PAGE_MODIFIED
)
502 pmd_val(pmd
) |= _PAGE_DIRTY
;
506 static inline pmd_t
pmd_wrprotect(pmd_t pmd
)
508 pmd_val(pmd
) &= ~(_PAGE_WRITE
| _PAGE_DIRTY
);
512 #define pmd_dirty pmd_dirty
513 static inline int pmd_dirty(pmd_t pmd
)
515 return !!(pmd_val(pmd
) & (_PAGE_DIRTY
| _PAGE_MODIFIED
));
518 static inline pmd_t
pmd_mkclean(pmd_t pmd
)
520 pmd_val(pmd
) &= ~(_PAGE_DIRTY
| _PAGE_MODIFIED
);
524 static inline pmd_t
pmd_mkdirty(pmd_t pmd
)
526 pmd_val(pmd
) |= _PAGE_MODIFIED
;
527 if (pmd_val(pmd
) & _PAGE_WRITE
)
528 pmd_val(pmd
) |= _PAGE_DIRTY
;
532 #define pmd_young pmd_young
533 static inline int pmd_young(pmd_t pmd
)
535 return !!(pmd_val(pmd
) & _PAGE_ACCESSED
);
538 static inline pmd_t
pmd_mkold(pmd_t pmd
)
540 pmd_val(pmd
) &= ~_PAGE_ACCESSED
;
544 static inline pmd_t
pmd_mkyoung(pmd_t pmd
)
546 pmd_val(pmd
) |= _PAGE_ACCESSED
;
550 static inline int pmd_devmap(pmd_t pmd
)
552 return !!(pmd_val(pmd
) & _PAGE_DEVMAP
);
555 static inline pmd_t
pmd_mkdevmap(pmd_t pmd
)
557 pmd_val(pmd
) |= _PAGE_DEVMAP
;
561 static inline struct page
*pmd_page(pmd_t pmd
)
563 if (pmd_trans_huge(pmd
))
564 return pfn_to_page(pmd_pfn(pmd
));
566 return pfn_to_page(pmd_phys(pmd
) >> PAGE_SHIFT
);
569 static inline pmd_t
pmd_modify(pmd_t pmd
, pgprot_t newprot
)
571 pmd_val(pmd
) = (pmd_val(pmd
) & _HPAGE_CHG_MASK
) |
572 (pgprot_val(newprot
) & ~_HPAGE_CHG_MASK
);
576 static inline pmd_t
pmd_mkinvalid(pmd_t pmd
)
578 pmd_val(pmd
) |= _PAGE_PRESENT_INVALID
;
579 pmd_val(pmd
) &= ~(_PAGE_PRESENT
| _PAGE_VALID
| _PAGE_DIRTY
| _PAGE_PROTNONE
);
585 * The generic version pmdp_huge_get_and_clear uses a version of pmd_clear() with a
586 * different prototype.
588 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
589 static inline pmd_t
pmdp_huge_get_and_clear(struct mm_struct
*mm
,
590 unsigned long address
, pmd_t
*pmdp
)
592 pmd_t old
= pmdp_get(pmdp
);
599 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
601 #ifdef CONFIG_NUMA_BALANCING
602 static inline long pte_protnone(pte_t pte
)
604 return (pte_val(pte
) & _PAGE_PROTNONE
);
607 static inline long pmd_protnone(pmd_t pmd
)
609 return (pmd_val(pmd
) & _PAGE_PROTNONE
);
611 #endif /* CONFIG_NUMA_BALANCING */
613 #define pmd_leaf(pmd) ((pmd_val(pmd) & _PAGE_HUGE) != 0)
614 #define pud_leaf(pud) ((pud_val(pud) & _PAGE_HUGE) != 0)
616 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
617 #define pud_devmap(pud) (0)
618 #define pgd_devmap(pgd) (0)
619 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
622 * We provide our own get_unmapped area to cope with the virtual aliasing
623 * constraints placed on us by the cache architecture.
625 #define HAVE_ARCH_UNMAPPED_AREA
626 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
628 #endif /* !__ASSEMBLY__ */
630 #endif /* _ASM_PGTABLE_H */