1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright (C) 2012 Regents of the University of California
6 #ifndef _ASM_RISCV_PGTABLE_H
7 #define _ASM_RISCV_PGTABLE_H
9 #include <linux/mmzone.h>
10 #include <linux/sizes.h>
12 #include <asm/pgtable-bits.h>
15 #define KERNEL_LINK_ADDR PAGE_OFFSET
16 #define KERN_VIRT_SIZE (UL(-1))
19 #define ADDRESS_SPACE_END (UL(-1))
22 /* Leave 2GB for kernel and BPF at the end of the address space */
23 #define KERNEL_LINK_ADDR (ADDRESS_SPACE_END - SZ_2G + 1)
25 #define KERNEL_LINK_ADDR PAGE_OFFSET
28 /* Number of entries in the page global directory */
29 #define PTRS_PER_PGD (PAGE_SIZE / sizeof(pgd_t))
30 /* Number of entries in the page table */
31 #define PTRS_PER_PTE (PAGE_SIZE / sizeof(pte_t))
34 * Half of the kernel address space (1/4 of the entries of the page global
35 * directory) is for the direct mapping.
37 #define KERN_VIRT_SIZE ((PTRS_PER_PGD / 2 * PGDIR_SIZE) / 2)
39 #define VMALLOC_SIZE (KERN_VIRT_SIZE >> 1)
40 #define VMALLOC_END PAGE_OFFSET
41 #define VMALLOC_START (PAGE_OFFSET - VMALLOC_SIZE)
43 #define BPF_JIT_REGION_SIZE (SZ_128M)
45 #define BPF_JIT_REGION_START (BPF_JIT_REGION_END - BPF_JIT_REGION_SIZE)
46 #define BPF_JIT_REGION_END (MODULES_END)
48 #define BPF_JIT_REGION_START (PAGE_OFFSET - BPF_JIT_REGION_SIZE)
49 #define BPF_JIT_REGION_END (VMALLOC_END)
52 /* Modules always live before the kernel */
54 /* This is used to define the end of the KASAN shadow region */
55 #define MODULES_LOWEST_VADDR (KERNEL_LINK_ADDR - SZ_2G)
56 #define MODULES_VADDR (PFN_ALIGN((unsigned long)&_end) - SZ_2G)
57 #define MODULES_END (PFN_ALIGN((unsigned long)&_start))
59 #define MODULES_VADDR VMALLOC_START
60 #define MODULES_END VMALLOC_END
64 * Roughly size the vmemmap space to be large enough to fit enough
65 * struct pages to map half the virtual address space. Then
66 * position vmemmap directly below the VMALLOC region.
68 #define VA_BITS_SV32 32
70 #define VA_BITS_SV39 39
71 #define VA_BITS_SV48 48
72 #define VA_BITS_SV57 57
74 #define VA_BITS (pgtable_l5_enabled ? \
75 VA_BITS_SV57 : (pgtable_l4_enabled ? VA_BITS_SV48 : VA_BITS_SV39))
77 #define VA_BITS VA_BITS_SV32
80 #define VMEMMAP_SHIFT \
81 (VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT)
82 #define VMEMMAP_SIZE BIT(VMEMMAP_SHIFT)
83 #define VMEMMAP_END VMALLOC_START
84 #define VMEMMAP_START (VMALLOC_START - VMEMMAP_SIZE)
87 * Define vmemmap for pfn_to_page & page_to_pfn calls. Needed if kernel
88 * is configured with CONFIG_SPARSEMEM_VMEMMAP enabled.
90 #define vmemmap ((struct page *)VMEMMAP_START - (phys_ram_base >> PAGE_SHIFT))
92 #define PCI_IO_SIZE SZ_16M
93 #define PCI_IO_END VMEMMAP_START
94 #define PCI_IO_START (PCI_IO_END - PCI_IO_SIZE)
96 #define FIXADDR_TOP PCI_IO_START
98 #define MAX_FDT_SIZE PMD_SIZE
99 #define FIX_FDT_SIZE (MAX_FDT_SIZE + SZ_2M)
100 #define FIXADDR_SIZE (PMD_SIZE + FIX_FDT_SIZE)
102 #define MAX_FDT_SIZE PGDIR_SIZE
103 #define FIX_FDT_SIZE MAX_FDT_SIZE
104 #define FIXADDR_SIZE (PGDIR_SIZE + FIX_FDT_SIZE)
106 #define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
112 #include <asm/page.h>
113 #include <asm/tlbflush.h>
114 #include <linux/mm_types.h>
115 #include <asm/compat.h>
117 #define __page_val_to_pfn(_val) (((_val) & _PAGE_PFN_MASK) >> _PAGE_PFN_SHIFT)
120 #include <asm/pgtable-64.h>
122 #define VA_USER_SV39 (UL(1) << (VA_BITS_SV39 - 1))
123 #define VA_USER_SV48 (UL(1) << (VA_BITS_SV48 - 1))
124 #define VA_USER_SV57 (UL(1) << (VA_BITS_SV57 - 1))
126 #define MMAP_VA_BITS_64 ((VA_BITS >= VA_BITS_SV48) ? VA_BITS_SV48 : VA_BITS)
127 #define MMAP_MIN_VA_BITS_64 (VA_BITS_SV39)
128 #define MMAP_VA_BITS (is_compat_task() ? VA_BITS_SV32 : MMAP_VA_BITS_64)
129 #define MMAP_MIN_VA_BITS (is_compat_task() ? VA_BITS_SV32 : MMAP_MIN_VA_BITS_64)
131 #include <asm/pgtable-32.h>
132 #endif /* CONFIG_64BIT */
134 #include <linux/page_table_check.h>
136 #ifdef CONFIG_XIP_KERNEL
137 #define XIP_FIXUP(addr) ({ \
138 extern char _sdata[], _start[], _end[]; \
139 uintptr_t __rom_start_data = CONFIG_XIP_PHYS_ADDR \
140 + (uintptr_t)&_sdata - (uintptr_t)&_start; \
141 uintptr_t __rom_end_data = CONFIG_XIP_PHYS_ADDR \
142 + (uintptr_t)&_end - (uintptr_t)&_start; \
143 uintptr_t __a = (uintptr_t)(addr); \
144 (__a >= __rom_start_data && __a < __rom_end_data) ? \
145 __a - __rom_start_data + CONFIG_PHYS_RAM_BASE : __a; \
148 #define XIP_FIXUP(addr) (addr)
149 #endif /* CONFIG_XIP_KERNEL */
151 struct pt_alloc_ops
{
152 pte_t
*(*get_pte_virt
)(phys_addr_t pa
);
153 phys_addr_t (*alloc_pte
)(uintptr_t va
);
154 #ifndef __PAGETABLE_PMD_FOLDED
155 pmd_t
*(*get_pmd_virt
)(phys_addr_t pa
);
156 phys_addr_t (*alloc_pmd
)(uintptr_t va
);
157 pud_t
*(*get_pud_virt
)(phys_addr_t pa
);
158 phys_addr_t (*alloc_pud
)(uintptr_t va
);
159 p4d_t
*(*get_p4d_virt
)(phys_addr_t pa
);
160 phys_addr_t (*alloc_p4d
)(uintptr_t va
);
164 extern struct pt_alloc_ops pt_ops __meminitdata
;
167 /* Number of PGD entries that a user-mode program can use */
168 #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
170 /* Page protection bits */
171 #define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER)
173 #define PAGE_NONE __pgprot(_PAGE_PROT_NONE | _PAGE_READ)
174 #define PAGE_READ __pgprot(_PAGE_BASE | _PAGE_READ)
175 #define PAGE_WRITE __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_WRITE)
176 #define PAGE_EXEC __pgprot(_PAGE_BASE | _PAGE_EXEC)
177 #define PAGE_READ_EXEC __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_EXEC)
178 #define PAGE_WRITE_EXEC __pgprot(_PAGE_BASE | _PAGE_READ | \
179 _PAGE_EXEC | _PAGE_WRITE)
181 #define PAGE_COPY PAGE_READ
182 #define PAGE_COPY_EXEC PAGE_READ_EXEC
183 #define PAGE_SHARED PAGE_WRITE
184 #define PAGE_SHARED_EXEC PAGE_WRITE_EXEC
186 #define _PAGE_KERNEL (_PAGE_READ \
193 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
194 #define PAGE_KERNEL_READ __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE)
195 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL | _PAGE_EXEC)
196 #define PAGE_KERNEL_READ_EXEC __pgprot((_PAGE_KERNEL & ~_PAGE_WRITE) \
199 #define PAGE_TABLE __pgprot(_PAGE_TABLE)
201 #define _PAGE_IOREMAP ((_PAGE_KERNEL & ~_PAGE_MTMASK) | _PAGE_IO)
202 #define PAGE_KERNEL_IO __pgprot(_PAGE_IOREMAP)
204 extern pgd_t swapper_pg_dir
[];
205 extern pgd_t trampoline_pg_dir
[];
206 extern pgd_t early_pg_dir
[];
208 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
209 static inline int pmd_present(pmd_t pmd
)
212 * Checking for _PAGE_LEAF is needed too because:
213 * When splitting a THP, split_huge_page() will temporarily clear
214 * the present bit, in this situation, pmd_present() and
215 * pmd_trans_huge() still needs to return true.
217 return (pmd_val(pmd
) & (_PAGE_PRESENT
| _PAGE_PROT_NONE
| _PAGE_LEAF
));
220 static inline int pmd_present(pmd_t pmd
)
222 return (pmd_val(pmd
) & (_PAGE_PRESENT
| _PAGE_PROT_NONE
));
226 static inline int pmd_none(pmd_t pmd
)
228 return (pmd_val(pmd
) == 0);
231 static inline int pmd_bad(pmd_t pmd
)
233 return !pmd_present(pmd
) || (pmd_val(pmd
) & _PAGE_LEAF
);
236 #define pmd_leaf pmd_leaf
237 static inline bool pmd_leaf(pmd_t pmd
)
239 return pmd_present(pmd
) && (pmd_val(pmd
) & _PAGE_LEAF
);
242 static inline void set_pmd(pmd_t
*pmdp
, pmd_t pmd
)
244 WRITE_ONCE(*pmdp
, pmd
);
247 static inline void pmd_clear(pmd_t
*pmdp
)
249 set_pmd(pmdp
, __pmd(0));
252 static inline pgd_t
pfn_pgd(unsigned long pfn
, pgprot_t prot
)
254 unsigned long prot_val
= pgprot_val(prot
);
256 ALT_THEAD_PMA(prot_val
);
258 return __pgd((pfn
<< _PAGE_PFN_SHIFT
) | prot_val
);
261 static inline unsigned long _pgd_pfn(pgd_t pgd
)
263 return __page_val_to_pfn(pgd_val(pgd
));
266 static inline struct page
*pmd_page(pmd_t pmd
)
268 return pfn_to_page(__page_val_to_pfn(pmd_val(pmd
)));
271 static inline unsigned long pmd_page_vaddr(pmd_t pmd
)
273 return (unsigned long)pfn_to_virt(__page_val_to_pfn(pmd_val(pmd
)));
276 static inline pte_t
pmd_pte(pmd_t pmd
)
278 return __pte(pmd_val(pmd
));
281 static inline pte_t
pud_pte(pud_t pud
)
283 return __pte(pud_val(pud
));
286 #ifdef CONFIG_RISCV_ISA_SVNAPOT
287 #include <asm/cpufeature.h>
289 static __always_inline
bool has_svnapot(void)
291 return riscv_has_extension_likely(RISCV_ISA_EXT_SVNAPOT
);
294 static inline unsigned long pte_napot(pte_t pte
)
296 return pte_val(pte
) & _PAGE_NAPOT
;
299 static inline pte_t
pte_mknapot(pte_t pte
, unsigned int order
)
301 int pos
= order
- 1 + _PAGE_PFN_SHIFT
;
302 unsigned long napot_bit
= BIT(pos
);
303 unsigned long napot_mask
= ~GENMASK(pos
, _PAGE_PFN_SHIFT
);
305 return __pte((pte_val(pte
) & napot_mask
) | napot_bit
| _PAGE_NAPOT
);
310 static __always_inline
bool has_svnapot(void) { return false; }
312 static inline unsigned long pte_napot(pte_t pte
)
317 #endif /* CONFIG_RISCV_ISA_SVNAPOT */
319 /* Yields the page frame number (PFN) of a page table entry */
320 static inline unsigned long pte_pfn(pte_t pte
)
322 unsigned long res
= __page_val_to_pfn(pte_val(pte
));
324 if (has_svnapot() && pte_napot(pte
))
325 res
= res
& (res
- 1UL);
330 #define pte_page(x) pfn_to_page(pte_pfn(x))
332 /* Constructs a page table entry */
333 static inline pte_t
pfn_pte(unsigned long pfn
, pgprot_t prot
)
335 unsigned long prot_val
= pgprot_val(prot
);
337 ALT_THEAD_PMA(prot_val
);
339 return __pte((pfn
<< _PAGE_PFN_SHIFT
) | prot_val
);
342 #define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)
344 static inline int pte_present(pte_t pte
)
346 return (pte_val(pte
) & (_PAGE_PRESENT
| _PAGE_PROT_NONE
));
349 #define pte_accessible pte_accessible
350 static inline unsigned long pte_accessible(struct mm_struct
*mm
, pte_t a
)
352 if (pte_val(a
) & _PAGE_PRESENT
)
355 if ((pte_val(a
) & _PAGE_PROT_NONE
) &&
356 atomic_read(&mm
->tlb_flush_pending
))
362 static inline int pte_none(pte_t pte
)
364 return (pte_val(pte
) == 0);
367 static inline int pte_write(pte_t pte
)
369 return pte_val(pte
) & _PAGE_WRITE
;
372 static inline int pte_exec(pte_t pte
)
374 return pte_val(pte
) & _PAGE_EXEC
;
377 static inline int pte_user(pte_t pte
)
379 return pte_val(pte
) & _PAGE_USER
;
382 static inline int pte_huge(pte_t pte
)
384 return pte_present(pte
) && (pte_val(pte
) & _PAGE_LEAF
);
387 static inline int pte_dirty(pte_t pte
)
389 return pte_val(pte
) & _PAGE_DIRTY
;
392 static inline int pte_young(pte_t pte
)
394 return pte_val(pte
) & _PAGE_ACCESSED
;
397 static inline int pte_special(pte_t pte
)
399 return pte_val(pte
) & _PAGE_SPECIAL
;
402 #ifdef CONFIG_ARCH_HAS_PTE_DEVMAP
403 static inline int pte_devmap(pte_t pte
)
405 return pte_val(pte
) & _PAGE_DEVMAP
;
409 /* static inline pte_t pte_rdprotect(pte_t pte) */
411 static inline pte_t
pte_wrprotect(pte_t pte
)
413 return __pte(pte_val(pte
) & ~(_PAGE_WRITE
));
416 /* static inline pte_t pte_mkread(pte_t pte) */
418 static inline pte_t
pte_mkwrite_novma(pte_t pte
)
420 return __pte(pte_val(pte
) | _PAGE_WRITE
);
423 /* static inline pte_t pte_mkexec(pte_t pte) */
425 static inline pte_t
pte_mkdirty(pte_t pte
)
427 return __pte(pte_val(pte
) | _PAGE_DIRTY
);
430 static inline pte_t
pte_mkclean(pte_t pte
)
432 return __pte(pte_val(pte
) & ~(_PAGE_DIRTY
));
435 static inline pte_t
pte_mkyoung(pte_t pte
)
437 return __pte(pte_val(pte
) | _PAGE_ACCESSED
);
440 static inline pte_t
pte_mkold(pte_t pte
)
442 return __pte(pte_val(pte
) & ~(_PAGE_ACCESSED
));
445 static inline pte_t
pte_mkspecial(pte_t pte
)
447 return __pte(pte_val(pte
) | _PAGE_SPECIAL
);
450 static inline pte_t
pte_mkdevmap(pte_t pte
)
452 return __pte(pte_val(pte
) | _PAGE_DEVMAP
);
455 static inline pte_t
pte_mkhuge(pte_t pte
)
460 #ifdef CONFIG_RISCV_ISA_SVNAPOT
461 #define pte_leaf_size(pte) (pte_napot(pte) ? \
462 napot_cont_size(napot_cont_order(pte)) :\
466 #ifdef CONFIG_NUMA_BALANCING
468 * See the comment in include/asm-generic/pgtable.h
470 static inline int pte_protnone(pte_t pte
)
472 return (pte_val(pte
) & (_PAGE_PRESENT
| _PAGE_PROT_NONE
)) == _PAGE_PROT_NONE
;
475 static inline int pmd_protnone(pmd_t pmd
)
477 return pte_protnone(pmd_pte(pmd
));
481 /* Modify page protection bits */
482 static inline pte_t
pte_modify(pte_t pte
, pgprot_t newprot
)
484 unsigned long newprot_val
= pgprot_val(newprot
);
486 ALT_THEAD_PMA(newprot_val
);
488 return __pte((pte_val(pte
) & _PAGE_CHG_MASK
) | newprot_val
);
491 #define pgd_ERROR(e) \
492 pr_err("%s:%d: bad pgd " PTE_FMT ".\n", __FILE__, __LINE__, pgd_val(e))
495 /* Commit new configuration to MMU hardware */
496 static inline void update_mmu_cache_range(struct vm_fault
*vmf
,
497 struct vm_area_struct
*vma
, unsigned long address
,
498 pte_t
*ptep
, unsigned int nr
)
500 asm goto(ALTERNATIVE("nop", "j %l[svvptc]", 0, RISCV_ISA_EXT_SVVPTC
, 1)
504 * The kernel assumes that TLBs don't cache invalid entries, but
505 * in RISC-V, SFENCE.VMA specifies an ordering constraint, not a
506 * cache flush; it is necessary even after writing invalid entries.
507 * Relying on flush_tlb_fix_spurious_fault would suffice, but
508 * the extra traps reduce performance. So, eagerly SFENCE.VMA.
511 local_flush_tlb_page(address
+ nr
* PAGE_SIZE
);
515 * Svvptc guarantees that the new valid pte will be visible within
516 * a bounded timeframe, so when the uarch does not cache invalid
517 * entries, we don't have to do anything.
520 #define update_mmu_cache(vma, addr, ptep) \
521 update_mmu_cache_range(NULL, vma, addr, ptep, 1)
523 #define update_mmu_tlb_range(vma, addr, ptep, nr) \
524 update_mmu_cache_range(NULL, vma, addr, ptep, nr)
526 static inline void update_mmu_cache_pmd(struct vm_area_struct
*vma
,
527 unsigned long address
, pmd_t
*pmdp
)
529 pte_t
*ptep
= (pte_t
*)pmdp
;
531 update_mmu_cache(vma
, address
, ptep
);
534 #define __HAVE_ARCH_PTE_SAME
535 static inline int pte_same(pte_t pte_a
, pte_t pte_b
)
537 return pte_val(pte_a
) == pte_val(pte_b
);
541 * Certain architectures need to do special things when PTEs within
542 * a page table are directly modified. Thus, the following hook is
545 static inline void set_pte(pte_t
*ptep
, pte_t pteval
)
547 WRITE_ONCE(*ptep
, pteval
);
550 void flush_icache_pte(struct mm_struct
*mm
, pte_t pte
);
552 static inline void __set_pte_at(struct mm_struct
*mm
, pte_t
*ptep
, pte_t pteval
)
554 if (pte_present(pteval
) && pte_exec(pteval
))
555 flush_icache_pte(mm
, pteval
);
557 set_pte(ptep
, pteval
);
560 #define PFN_PTE_SHIFT _PAGE_PFN_SHIFT
562 static inline void set_ptes(struct mm_struct
*mm
, unsigned long addr
,
563 pte_t
*ptep
, pte_t pteval
, unsigned int nr
)
565 page_table_check_ptes_set(mm
, ptep
, pteval
, nr
);
568 __set_pte_at(mm
, ptep
, pteval
);
572 pte_val(pteval
) += 1 << _PAGE_PFN_SHIFT
;
575 #define set_ptes set_ptes
577 static inline void pte_clear(struct mm_struct
*mm
,
578 unsigned long addr
, pte_t
*ptep
)
580 __set_pte_at(mm
, ptep
, __pte(0));
583 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS /* defined in mm/pgtable.c */
584 extern int ptep_set_access_flags(struct vm_area_struct
*vma
, unsigned long address
,
585 pte_t
*ptep
, pte_t entry
, int dirty
);
586 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG /* defined in mm/pgtable.c */
587 extern int ptep_test_and_clear_young(struct vm_area_struct
*vma
, unsigned long address
,
590 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
591 static inline pte_t
ptep_get_and_clear(struct mm_struct
*mm
,
592 unsigned long address
, pte_t
*ptep
)
594 pte_t pte
= __pte(atomic_long_xchg((atomic_long_t
*)ptep
, 0));
596 page_table_check_pte_clear(mm
, pte
);
601 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
602 static inline void ptep_set_wrprotect(struct mm_struct
*mm
,
603 unsigned long address
, pte_t
*ptep
)
605 atomic_long_and(~(unsigned long)_PAGE_WRITE
, (atomic_long_t
*)ptep
);
608 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
609 static inline int ptep_clear_flush_young(struct vm_area_struct
*vma
,
610 unsigned long address
, pte_t
*ptep
)
613 * This comment is borrowed from x86, but applies equally to RISC-V:
615 * Clearing the accessed bit without a TLB flush
616 * doesn't cause data corruption. [ It could cause incorrect
617 * page aging and the (mistaken) reclaim of hot pages, but the
618 * chance of that should be relatively low. ]
620 * So as a performance optimization don't flush the TLB when
621 * clearing the accessed bit, it will eventually be flushed by
622 * a context switch or a VM operation anyway. [ In the rare
623 * event of it not getting flushed for a long time the delay
624 * shouldn't really matter because there's no real memory
625 * pressure for swapout to react to. ]
627 return ptep_test_and_clear_young(vma
, address
, ptep
);
630 #define pgprot_nx pgprot_nx
631 static inline pgprot_t
pgprot_nx(pgprot_t _prot
)
633 return __pgprot(pgprot_val(_prot
) & ~_PAGE_EXEC
);
636 #define pgprot_noncached pgprot_noncached
637 static inline pgprot_t
pgprot_noncached(pgprot_t _prot
)
639 unsigned long prot
= pgprot_val(_prot
);
641 prot
&= ~_PAGE_MTMASK
;
644 return __pgprot(prot
);
647 #define pgprot_writecombine pgprot_writecombine
648 static inline pgprot_t
pgprot_writecombine(pgprot_t _prot
)
650 unsigned long prot
= pgprot_val(_prot
);
652 prot
&= ~_PAGE_MTMASK
;
653 prot
|= _PAGE_NOCACHE
;
655 return __pgprot(prot
);
661 static inline pmd_t
pte_pmd(pte_t pte
)
663 return __pmd(pte_val(pte
));
666 static inline pmd_t
pmd_mkhuge(pmd_t pmd
)
671 static inline pmd_t
pmd_mkinvalid(pmd_t pmd
)
673 return __pmd(pmd_val(pmd
) & ~(_PAGE_PRESENT
|_PAGE_PROT_NONE
));
676 #define __pmd_to_phys(pmd) (__page_val_to_pfn(pmd_val(pmd)) << PAGE_SHIFT)
678 static inline unsigned long pmd_pfn(pmd_t pmd
)
680 return ((__pmd_to_phys(pmd
) & PMD_MASK
) >> PAGE_SHIFT
);
683 #define __pud_to_phys(pud) (__page_val_to_pfn(pud_val(pud)) << PAGE_SHIFT)
685 #define pud_pfn pud_pfn
686 static inline unsigned long pud_pfn(pud_t pud
)
688 return ((__pud_to_phys(pud
) & PUD_MASK
) >> PAGE_SHIFT
);
691 static inline pmd_t
pmd_modify(pmd_t pmd
, pgprot_t newprot
)
693 return pte_pmd(pte_modify(pmd_pte(pmd
), newprot
));
696 #define pmd_write pmd_write
697 static inline int pmd_write(pmd_t pmd
)
699 return pte_write(pmd_pte(pmd
));
702 #define pud_write pud_write
703 static inline int pud_write(pud_t pud
)
705 return pte_write(pud_pte(pud
));
708 #define pmd_dirty pmd_dirty
709 static inline int pmd_dirty(pmd_t pmd
)
711 return pte_dirty(pmd_pte(pmd
));
714 #define pmd_young pmd_young
715 static inline int pmd_young(pmd_t pmd
)
717 return pte_young(pmd_pte(pmd
));
720 static inline int pmd_user(pmd_t pmd
)
722 return pte_user(pmd_pte(pmd
));
725 static inline pmd_t
pmd_mkold(pmd_t pmd
)
727 return pte_pmd(pte_mkold(pmd_pte(pmd
)));
730 static inline pmd_t
pmd_mkyoung(pmd_t pmd
)
732 return pte_pmd(pte_mkyoung(pmd_pte(pmd
)));
735 static inline pmd_t
pmd_mkwrite_novma(pmd_t pmd
)
737 return pte_pmd(pte_mkwrite_novma(pmd_pte(pmd
)));
740 static inline pmd_t
pmd_wrprotect(pmd_t pmd
)
742 return pte_pmd(pte_wrprotect(pmd_pte(pmd
)));
745 static inline pmd_t
pmd_mkclean(pmd_t pmd
)
747 return pte_pmd(pte_mkclean(pmd_pte(pmd
)));
750 static inline pmd_t
pmd_mkdirty(pmd_t pmd
)
752 return pte_pmd(pte_mkdirty(pmd_pte(pmd
)));
755 static inline pmd_t
pmd_mkdevmap(pmd_t pmd
)
757 return pte_pmd(pte_mkdevmap(pmd_pte(pmd
)));
760 static inline void set_pmd_at(struct mm_struct
*mm
, unsigned long addr
,
761 pmd_t
*pmdp
, pmd_t pmd
)
763 page_table_check_pmd_set(mm
, pmdp
, pmd
);
764 return __set_pte_at(mm
, (pte_t
*)pmdp
, pmd_pte(pmd
));
767 static inline void set_pud_at(struct mm_struct
*mm
, unsigned long addr
,
768 pud_t
*pudp
, pud_t pud
)
770 page_table_check_pud_set(mm
, pudp
, pud
);
771 return __set_pte_at(mm
, (pte_t
*)pudp
, pud_pte(pud
));
774 #ifdef CONFIG_PAGE_TABLE_CHECK
775 static inline bool pte_user_accessible_page(pte_t pte
)
777 return pte_present(pte
) && pte_user(pte
);
780 static inline bool pmd_user_accessible_page(pmd_t pmd
)
782 return pmd_leaf(pmd
) && pmd_user(pmd
);
785 static inline bool pud_user_accessible_page(pud_t pud
)
787 return pud_leaf(pud
) && pud_user(pud
);
791 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
792 static inline int pmd_trans_huge(pmd_t pmd
)
794 return pmd_leaf(pmd
);
797 #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
798 static inline int pmdp_set_access_flags(struct vm_area_struct
*vma
,
799 unsigned long address
, pmd_t
*pmdp
,
800 pmd_t entry
, int dirty
)
802 return ptep_set_access_flags(vma
, address
, (pte_t
*)pmdp
, pmd_pte(entry
), dirty
);
805 #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
806 static inline int pmdp_test_and_clear_young(struct vm_area_struct
*vma
,
807 unsigned long address
, pmd_t
*pmdp
)
809 return ptep_test_and_clear_young(vma
, address
, (pte_t
*)pmdp
);
812 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
813 static inline pmd_t
pmdp_huge_get_and_clear(struct mm_struct
*mm
,
814 unsigned long address
, pmd_t
*pmdp
)
816 pmd_t pmd
= __pmd(atomic_long_xchg((atomic_long_t
*)pmdp
, 0));
818 page_table_check_pmd_clear(mm
, pmd
);
823 #define __HAVE_ARCH_PMDP_SET_WRPROTECT
824 static inline void pmdp_set_wrprotect(struct mm_struct
*mm
,
825 unsigned long address
, pmd_t
*pmdp
)
827 ptep_set_wrprotect(mm
, address
, (pte_t
*)pmdp
);
830 #define pmdp_establish pmdp_establish
831 static inline pmd_t
pmdp_establish(struct vm_area_struct
*vma
,
832 unsigned long address
, pmd_t
*pmdp
, pmd_t pmd
)
834 page_table_check_pmd_set(vma
->vm_mm
, pmdp
, pmd
);
835 return __pmd(atomic_long_xchg((atomic_long_t
*)pmdp
, pmd_val(pmd
)));
838 #define pmdp_collapse_flush pmdp_collapse_flush
839 extern pmd_t
pmdp_collapse_flush(struct vm_area_struct
*vma
,
840 unsigned long address
, pmd_t
*pmdp
);
841 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
844 * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that
845 * are !pte_none() && !pte_present().
847 * Format of swap PTE:
848 * bit 0: _PAGE_PRESENT (zero)
849 * bit 1 to 3: _PAGE_LEAF (zero)
850 * bit 5: _PAGE_PROT_NONE (zero)
851 * bit 6: exclusive marker
852 * bits 7 to 11: swap type
853 * bits 12 to XLEN-1: swap offset
855 #define __SWP_TYPE_SHIFT 7
856 #define __SWP_TYPE_BITS 5
857 #define __SWP_TYPE_MASK ((1UL << __SWP_TYPE_BITS) - 1)
858 #define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
860 #define MAX_SWAPFILES_CHECK() \
861 BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
863 #define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
864 #define __swp_offset(x) ((x).val >> __SWP_OFFSET_SHIFT)
865 #define __swp_entry(type, offset) ((swp_entry_t) \
866 { (((type) & __SWP_TYPE_MASK) << __SWP_TYPE_SHIFT) | \
867 ((offset) << __SWP_OFFSET_SHIFT) })
869 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
870 #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
872 static inline int pte_swp_exclusive(pte_t pte
)
874 return pte_val(pte
) & _PAGE_SWP_EXCLUSIVE
;
877 static inline pte_t
pte_swp_mkexclusive(pte_t pte
)
879 return __pte(pte_val(pte
) | _PAGE_SWP_EXCLUSIVE
);
882 static inline pte_t
pte_swp_clear_exclusive(pte_t pte
)
884 return __pte(pte_val(pte
) & ~_PAGE_SWP_EXCLUSIVE
);
887 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
888 #define __pmd_to_swp_entry(pmd) ((swp_entry_t) { pmd_val(pmd) })
889 #define __swp_entry_to_pmd(swp) __pmd((swp).val)
890 #endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */
893 * In the RV64 Linux scheme, we give the user half of the virtual-address space
894 * and give the kernel the other (upper) half.
897 #define KERN_VIRT_START (-(BIT(VA_BITS)) + TASK_SIZE)
899 #define KERN_VIRT_START FIXADDR_START
903 * Task size is 0x4000000000 for RV64 or 0x9fc00000 for RV32.
904 * Note that PGDIR_SIZE must evenly divide TASK_SIZE.
906 * - 0x9fc00000 (~2.5GB) for RV32.
907 * - 0x4000000000 ( 256GB) for RV64 using SV39 mmu
908 * - 0x800000000000 ( 128TB) for RV64 using SV48 mmu
909 * - 0x100000000000000 ( 64PB) for RV64 using SV57 mmu
911 * Note that PGDIR_SIZE must evenly divide TASK_SIZE since "RISC-V
912 * Instruction Set Manual Volume II: Privileged Architecture" states that
913 * "load and store effective addresses, which are 64bits, must have bits
914 * 63–48 all equal to bit 47, or else a page-fault exception will occur."
915 * Similarly for SV57, bits 63–57 must be equal to bit 56.
918 #define TASK_SIZE_64 (PGDIR_SIZE * PTRS_PER_PGD / 2)
919 #define TASK_SIZE_MAX LONG_MAX
922 #define TASK_SIZE_32 (_AC(0x80000000, UL) - PAGE_SIZE)
923 #define TASK_SIZE (is_compat_task() ? \
924 TASK_SIZE_32 : TASK_SIZE_64)
926 #define TASK_SIZE TASK_SIZE_64
930 #define TASK_SIZE FIXADDR_START
933 #else /* CONFIG_MMU */
935 #define PAGE_SHARED __pgprot(0)
936 #define PAGE_KERNEL __pgprot(0)
937 #define swapper_pg_dir NULL
938 #define TASK_SIZE _AC(-1, UL)
939 #define VMALLOC_START _AC(0, UL)
940 #define VMALLOC_END TASK_SIZE
942 #endif /* !CONFIG_MMU */
944 extern char _start
[];
945 extern void *_dtb_early_va
;
946 extern uintptr_t _dtb_early_pa
;
947 #if defined(CONFIG_XIP_KERNEL) && defined(CONFIG_MMU)
948 #define dtb_early_va (*(void **)XIP_FIXUP(&_dtb_early_va))
949 #define dtb_early_pa (*(uintptr_t *)XIP_FIXUP(&_dtb_early_pa))
951 #define dtb_early_va _dtb_early_va
952 #define dtb_early_pa _dtb_early_pa
953 #endif /* CONFIG_XIP_KERNEL */
954 extern u64 satp_mode
;
956 void paging_init(void);
957 void misc_mem_init(void);
960 * ZERO_PAGE is a global shared page that is always zero,
961 * used for zero-mapped memory areas, etc.
963 extern unsigned long empty_zero_page
[PAGE_SIZE
/ sizeof(unsigned long)];
964 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
966 #endif /* !__ASSEMBLY__ */
968 #endif /* _ASM_RISCV_PGTABLE_H */