1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_PGTABLE_H
3 #define _ASM_POWERPC_PGTABLE_H
6 #include <linux/mmdebug.h>
7 #include <linux/mmzone.h>
8 #include <asm/processor.h> /* For TASK_SIZE */
11 #include <asm/tlbflush.h>
15 #endif /* !__ASSEMBLY__ */
17 #ifdef CONFIG_PPC_BOOK3S
18 #include <asm/book3s/pgtable.h>
20 #include <asm/nohash/pgtable.h>
21 #endif /* !CONFIG_PPC_BOOK3S */
24 * Protection used for kernel text. We want the debuggers to be able to
25 * set breakpoints anywhere, so don't write protect the kernel text
26 * on platforms where such control is possible.
28 #if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) || \
29 defined(CONFIG_KPROBES) || defined(CONFIG_DYNAMIC_FTRACE)
30 #define PAGE_KERNEL_TEXT PAGE_KERNEL_X
32 #define PAGE_KERNEL_TEXT PAGE_KERNEL_ROX
35 /* Make modules code happy. We don't set RO yet */
36 #define PAGE_KERNEL_EXEC PAGE_KERNEL_X
38 /* Advertise special mapping type for AGP */
39 #define PAGE_AGP (PAGE_KERNEL_NC)
44 #define PFN_PTE_SHIFT PTE_RPN_SHIFT
46 void set_ptes(struct mm_struct
*mm
, unsigned long addr
, pte_t
*ptep
,
47 pte_t pte
, unsigned int nr
);
48 #define set_ptes set_ptes
49 #define update_mmu_cache(vma, addr, ptep) \
50 update_mmu_cache_range(NULL, vma, addr, ptep, 1)
52 #ifndef MAX_PTRS_PER_PGD
53 #define MAX_PTRS_PER_PGD PTRS_PER_PGD
56 /* Keep these as a macros to avoid include dependency mess */
57 #define pte_page(x) pfn_to_page(pte_pfn(x))
58 #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
60 static inline unsigned long pte_pfn(pte_t pte
)
62 return (pte_val(pte
) & PTE_RPN_MASK
) >> PTE_RPN_SHIFT
;
66 * Select all bits except the pfn
68 #define pte_pgprot pte_pgprot
69 static inline pgprot_t
pte_pgprot(pte_t pte
)
71 unsigned long pte_flags
;
73 pte_flags
= pte_val(pte
) & ~PTE_RPN_MASK
;
74 return __pgprot(pte_flags
);
77 static inline pgprot_t
pgprot_nx(pgprot_t prot
)
79 return pte_pgprot(pte_exprotect(__pte(pgprot_val(prot
))));
81 #define pgprot_nx pgprot_nx
83 #ifndef pmd_page_vaddr
84 static inline const void *pmd_page_vaddr(pmd_t pmd
)
86 return __va(pmd_val(pmd
) & ~PMD_MASKED_BITS
);
88 #define pmd_page_vaddr pmd_page_vaddr
91 * ZERO_PAGE is a global shared page that is always zero: used
92 * for zero-mapped memory areas etc..
94 extern unsigned long empty_zero_page
[];
95 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
97 extern pgd_t swapper_pg_dir
[];
99 extern void paging_init(void);
100 void poking_init(void);
102 extern unsigned long ioremap_bot
;
103 extern const pgprot_t protection_map
[16];
105 /* can we use this in kvm */
106 unsigned long vmalloc_to_phys(void *vmalloc_addr
);
108 void pgtable_cache_add(unsigned int shift
);
111 void __init
*early_alloc_pgtable(unsigned long size
);
113 pte_t
*early_pte_alloc_kernel(pmd_t
*pmdp
, unsigned long va
);
115 #if defined(CONFIG_STRICT_KERNEL_RWX) || defined(CONFIG_PPC32)
116 void mark_initmem_nx(void);
118 static inline void mark_initmem_nx(void) { }
121 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
122 int ptep_set_access_flags(struct vm_area_struct
*vma
, unsigned long address
,
123 pte_t
*ptep
, pte_t entry
, int dirty
);
125 pgprot_t
__phys_mem_access_prot(unsigned long pfn
, unsigned long size
,
129 static inline pgprot_t
phys_mem_access_prot(struct file
*file
, unsigned long pfn
,
130 unsigned long size
, pgprot_t vma_prot
)
132 return __phys_mem_access_prot(pfn
, size
, vma_prot
);
134 #define __HAVE_PHYS_MEM_ACCESS_PROT
136 void __update_mmu_cache(struct vm_area_struct
*vma
, unsigned long address
, pte_t
*ptep
);
139 * This gets called at the end of handling a page fault, when
140 * the kernel has put a new PTE into the page table for the process.
141 * We use it to ensure coherency between the i-cache and d-cache
142 * for the page which has just been mapped in.
143 * On machines which use an MMU hash table, we use this to put a
144 * corresponding HPTE into the hash table ahead of time, instead of
145 * waiting for the inevitable extra hash-table miss exception.
147 static inline void update_mmu_cache_range(struct vm_fault
*vmf
,
148 struct vm_area_struct
*vma
, unsigned long address
,
149 pte_t
*ptep
, unsigned int nr
)
151 if ((mmu_has_feature(MMU_FTR_HPTE_TABLE
) && !radix_enabled()) ||
152 (IS_ENABLED(CONFIG_PPC_E500
) && IS_ENABLED(CONFIG_HUGETLB_PAGE
)))
153 __update_mmu_cache(vma
, address
, ptep
);
157 * When used, PTE_FRAG_NR is defined in subarch pgtable.h
158 * so we are sure it is included when arriving here.
161 static inline void *pte_frag_get(mm_context_t
*ctx
)
163 return ctx
->pte_frag
;
166 static inline void pte_frag_set(mm_context_t
*ctx
, void *p
)
171 #define PTE_FRAG_NR 1
172 #define PTE_FRAG_SIZE_SHIFT PAGE_SHIFT
173 #define PTE_FRAG_SIZE (1UL << PTE_FRAG_SIZE_SHIFT)
175 static inline void *pte_frag_get(mm_context_t
*ctx
)
180 static inline void pte_frag_set(mm_context_t
*ctx
, void *p
)
185 #define pmd_pgtable pmd_pgtable
186 static inline pgtable_t
pmd_pgtable(pmd_t pmd
)
188 return (pgtable_t
)pmd_page_vaddr(pmd
);
192 int __meminit
vmemmap_populated(unsigned long vmemmap_addr
, int vmemmap_map_size
);
193 bool altmap_cross_boundary(struct vmem_altmap
*altmap
, unsigned long start
,
194 unsigned long page_size
);
196 * mm/memory_hotplug.c:mhp_supports_memmap_on_memory goes into details
197 * some of the restrictions. We don't check for PMD_SIZE because our
198 * vmemmap allocation code can fallback correctly. The pageblock
199 * alignment requirement is met using altmap->reserve blocks.
201 #define arch_supports_memmap_on_memory arch_supports_memmap_on_memory
202 static inline bool arch_supports_memmap_on_memory(unsigned long vmemmap_size
)
204 if (!radix_enabled())
207 * With 4K page size and 2M PMD_SIZE, we can align
208 * things better with memory block size value
209 * starting from 128MB. Hence align things with PMD_SIZE.
211 if (IS_ENABLED(CONFIG_PPC_4K_PAGES
))
212 return IS_ALIGNED(vmemmap_size
, PMD_SIZE
);
216 #endif /* CONFIG_PPC64 */
218 #endif /* __ASSEMBLY__ */
220 #endif /* _ASM_POWERPC_PGTABLE_H */