treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / arch / powerpc / include / asm / book3s / 64 / hash.h
blob2781ebf6add4fc6cfa98d124ba020c664c51743e
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_BOOK3S_64_HASH_H
3 #define _ASM_POWERPC_BOOK3S_64_HASH_H
4 #ifdef __KERNEL__
6 #include <asm/asm-const.h>
8 /*
9 * Common bits between 4K and 64K pages in a linux-style PTE.
10 * Additional bits may be defined in pgtable-hash64-*.h
13 #define H_PTE_NONE_MASK _PAGE_HPTEFLAGS
15 #ifdef CONFIG_PPC_64K_PAGES
16 #include <asm/book3s/64/hash-64k.h>
17 #else
18 #include <asm/book3s/64/hash-4k.h>
19 #endif
21 /* Bits to set in a PMD/PUD/PGD entry valid bit*/
22 #define HASH_PMD_VAL_BITS (0x8000000000000000UL)
23 #define HASH_PUD_VAL_BITS (0x8000000000000000UL)
24 #define HASH_PGD_VAL_BITS (0x8000000000000000UL)
27 * Size of EA range mapped by our pagetables.
29 #define H_PGTABLE_EADDR_SIZE (H_PTE_INDEX_SIZE + H_PMD_INDEX_SIZE + \
30 H_PUD_INDEX_SIZE + H_PGD_INDEX_SIZE + PAGE_SHIFT)
31 #define H_PGTABLE_RANGE (ASM_CONST(1) << H_PGTABLE_EADDR_SIZE)
33 * Top 2 bits are ignored in page table walk.
35 #define EA_MASK (~(0xcUL << 60))
38 * We store the slot details in the second half of page table.
39 * Increase the pud level table so that hugetlb ptes can be stored
40 * at pud level.
42 #if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_PPC_64K_PAGES)
43 #define H_PUD_CACHE_INDEX (H_PUD_INDEX_SIZE + 1)
44 #else
45 #define H_PUD_CACHE_INDEX (H_PUD_INDEX_SIZE)
46 #endif
49 * +------------------------------+
50 * | |
51 * | |
52 * | |
53 * +------------------------------+ Kernel virtual map end (0xc00e000000000000)
54 * | |
55 * | |
56 * | 512TB/16TB of vmemmap |
57 * | |
58 * | |
59 * +------------------------------+ Kernel vmemmap start
60 * | |
61 * | 512TB/16TB of IO map |
62 * | |
63 * +------------------------------+ Kernel IO map start
64 * | |
65 * | 512TB/16TB of vmap |
66 * | |
67 * +------------------------------+ Kernel virt start (0xc008000000000000)
68 * | |
69 * | |
70 * | |
71 * +------------------------------+ Kernel linear (0xc.....)
74 #define H_VMALLOC_START H_KERN_VIRT_START
75 #define H_VMALLOC_SIZE H_KERN_MAP_SIZE
76 #define H_VMALLOC_END (H_VMALLOC_START + H_VMALLOC_SIZE)
78 #define H_KERN_IO_START H_VMALLOC_END
79 #define H_KERN_IO_SIZE H_KERN_MAP_SIZE
80 #define H_KERN_IO_END (H_KERN_IO_START + H_KERN_IO_SIZE)
82 #define H_VMEMMAP_START H_KERN_IO_END
83 #define H_VMEMMAP_SIZE H_KERN_MAP_SIZE
84 #define H_VMEMMAP_END (H_VMEMMAP_START + H_VMEMMAP_SIZE)
86 #define NON_LINEAR_REGION_ID(ea) ((((unsigned long)ea - H_KERN_VIRT_START) >> REGION_SHIFT) + 2)
89 * Region IDs
91 #define USER_REGION_ID 0
92 #define LINEAR_MAP_REGION_ID 1
93 #define VMALLOC_REGION_ID NON_LINEAR_REGION_ID(H_VMALLOC_START)
94 #define IO_REGION_ID NON_LINEAR_REGION_ID(H_KERN_IO_START)
95 #define VMEMMAP_REGION_ID NON_LINEAR_REGION_ID(H_VMEMMAP_START)
96 #define INVALID_REGION_ID (VMEMMAP_REGION_ID + 1)
99 * Defines the address of the vmemap area, in its own region on
100 * hash table CPUs.
102 #ifdef CONFIG_PPC_MM_SLICES
103 #define HAVE_ARCH_UNMAPPED_AREA
104 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
105 #endif /* CONFIG_PPC_MM_SLICES */
107 /* PTEIDX nibble */
108 #define _PTEIDX_SECONDARY 0x8
109 #define _PTEIDX_GROUP_IX 0x7
111 #define H_PMD_BAD_BITS (PTE_TABLE_SIZE-1)
112 #define H_PUD_BAD_BITS (PMD_TABLE_SIZE-1)
114 #ifndef __ASSEMBLY__
115 static inline int get_region_id(unsigned long ea)
117 int region_id;
118 int id = (ea >> 60UL);
120 if (id == 0)
121 return USER_REGION_ID;
123 if (id != (PAGE_OFFSET >> 60))
124 return INVALID_REGION_ID;
126 if (ea < H_KERN_VIRT_START)
127 return LINEAR_MAP_REGION_ID;
129 BUILD_BUG_ON(NON_LINEAR_REGION_ID(H_VMALLOC_START) != 2);
131 region_id = NON_LINEAR_REGION_ID(ea);
132 return region_id;
135 #define hash__pmd_bad(pmd) (pmd_val(pmd) & H_PMD_BAD_BITS)
136 #define hash__pud_bad(pud) (pud_val(pud) & H_PUD_BAD_BITS)
137 static inline int hash__pgd_bad(pgd_t pgd)
139 return (pgd_val(pgd) == 0);
141 #ifdef CONFIG_STRICT_KERNEL_RWX
142 extern void hash__mark_rodata_ro(void);
143 extern void hash__mark_initmem_nx(void);
144 #endif
146 extern void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
147 pte_t *ptep, unsigned long pte, int huge);
148 extern unsigned long htab_convert_pte_flags(unsigned long pteflags);
149 /* Atomic PTE updates */
150 static inline unsigned long hash__pte_update(struct mm_struct *mm,
151 unsigned long addr,
152 pte_t *ptep, unsigned long clr,
153 unsigned long set,
154 int huge)
156 __be64 old_be, tmp_be;
157 unsigned long old;
159 __asm__ __volatile__(
160 "1: ldarx %0,0,%3 # pte_update\n\
161 and. %1,%0,%6\n\
162 bne- 1b \n\
163 andc %1,%0,%4 \n\
164 or %1,%1,%7\n\
165 stdcx. %1,0,%3 \n\
166 bne- 1b"
167 : "=&r" (old_be), "=&r" (tmp_be), "=m" (*ptep)
168 : "r" (ptep), "r" (cpu_to_be64(clr)), "m" (*ptep),
169 "r" (cpu_to_be64(H_PAGE_BUSY)), "r" (cpu_to_be64(set))
170 : "cc" );
171 /* huge pages use the old page table lock */
172 if (!huge)
173 assert_pte_locked(mm, addr);
175 old = be64_to_cpu(old_be);
176 if (old & H_PAGE_HASHPTE)
177 hpte_need_flush(mm, addr, ptep, old, huge);
179 return old;
182 /* Set the dirty and/or accessed bits atomically in a linux PTE, this
183 * function doesn't need to flush the hash entry
185 static inline void hash__ptep_set_access_flags(pte_t *ptep, pte_t entry)
187 __be64 old, tmp, val, mask;
189 mask = cpu_to_be64(_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_READ | _PAGE_WRITE |
190 _PAGE_EXEC | _PAGE_SOFT_DIRTY);
192 val = pte_raw(entry) & mask;
194 __asm__ __volatile__(
195 "1: ldarx %0,0,%4\n\
196 and. %1,%0,%6\n\
197 bne- 1b \n\
198 or %0,%3,%0\n\
199 stdcx. %0,0,%4\n\
200 bne- 1b"
201 :"=&r" (old), "=&r" (tmp), "=m" (*ptep)
202 :"r" (val), "r" (ptep), "m" (*ptep), "r" (cpu_to_be64(H_PAGE_BUSY))
203 :"cc");
206 static inline int hash__pte_same(pte_t pte_a, pte_t pte_b)
208 return (((pte_raw(pte_a) ^ pte_raw(pte_b)) & ~cpu_to_be64(_PAGE_HPTEFLAGS)) == 0);
211 static inline int hash__pte_none(pte_t pte)
213 return (pte_val(pte) & ~H_PTE_NONE_MASK) == 0;
216 unsigned long pte_get_hash_gslot(unsigned long vpn, unsigned long shift,
217 int ssize, real_pte_t rpte, unsigned int subpg_index);
219 /* This low level function performs the actual PTE insertion
220 * Setting the PTE depends on the MMU type and other factors. It's
221 * an horrible mess that I'm not going to try to clean up now but
222 * I'm keeping it in one place rather than spread around
224 static inline void hash__set_pte_at(struct mm_struct *mm, unsigned long addr,
225 pte_t *ptep, pte_t pte, int percpu)
228 * Anything else just stores the PTE normally. That covers all 64-bit
229 * cases, and 32-bit non-hash with 32-bit PTEs.
231 *ptep = pte;
234 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
235 extern void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr,
236 pmd_t *pmdp, unsigned long old_pmd);
237 #else
238 static inline void hpte_do_hugepage_flush(struct mm_struct *mm,
239 unsigned long addr, pmd_t *pmdp,
240 unsigned long old_pmd)
242 WARN(1, "%s called with THP disabled\n", __func__);
244 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
247 int hash__map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot);
248 extern int __meminit hash__vmemmap_create_mapping(unsigned long start,
249 unsigned long page_size,
250 unsigned long phys);
251 extern void hash__vmemmap_remove_mapping(unsigned long start,
252 unsigned long page_size);
254 int hash__create_section_mapping(unsigned long start, unsigned long end, int nid);
255 int hash__remove_section_mapping(unsigned long start, unsigned long end);
257 #endif /* !__ASSEMBLY__ */
258 #endif /* __KERNEL__ */
259 #endif /* _ASM_POWERPC_BOOK3S_64_HASH_H */