1 #ifndef _ASM_POWERPC_BOOK3S_64_HASH_H
2 #define _ASM_POWERPC_BOOK3S_64_HASH_H
6 * Common bits between 4K and 64K pages in a linux-style PTE.
7 * Additional bits may be defined in pgtable-hash64-*.h
9 * Note: We only support user read/write permissions. Supervisor always
10 * have full read/write to pages above PAGE_OFFSET (pages below that
11 * always use the user access permissions).
13 * We could create separate kernel read-only if we used the 3 PP bits
14 * combinations that newer processors provide but we currently don't.
16 #define H_PAGE_BUSY 0x00800 /* software: PTE & hash are busy */
17 #define H_PTE_NONE_MASK _PAGE_HPTEFLAGS
18 #define H_PAGE_F_GIX_SHIFT 57
19 #define H_PAGE_F_GIX (7ul << 57) /* HPTE index within HPTEG */
20 #define H_PAGE_F_SECOND (1ul << 60) /* HPTE is in 2ndary HPTEG */
21 #define H_PAGE_HASHPTE (1ul << 61) /* PTE has associated HPTE */
23 #ifdef CONFIG_PPC_64K_PAGES
24 #include <asm/book3s/64/hash-64k.h>
26 #include <asm/book3s/64/hash-4k.h>
30 * Size of EA range mapped by our pagetables.
32 #define H_PGTABLE_EADDR_SIZE (H_PTE_INDEX_SIZE + H_PMD_INDEX_SIZE + \
33 H_PUD_INDEX_SIZE + H_PGD_INDEX_SIZE + PAGE_SHIFT)
34 #define H_PGTABLE_RANGE (ASM_CONST(1) << H_PGTABLE_EADDR_SIZE)
36 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && defined(CONFIG_PPC_64K_PAGES)
38 * only with hash 64k we need to use the second half of pmd page table
39 * to store pointer to deposited pgtable_t
41 #define H_PMD_CACHE_INDEX (H_PMD_INDEX_SIZE + 1)
43 #define H_PMD_CACHE_INDEX H_PMD_INDEX_SIZE
46 * Define the address range of the kernel non-linear virtual area
48 #define H_KERN_VIRT_START ASM_CONST(0xD000000000000000)
49 #define H_KERN_VIRT_SIZE ASM_CONST(0x0000100000000000)
52 * The vmalloc space starts at the beginning of that region, and
53 * occupies half of it on hash CPUs and a quarter of it on Book3E
54 * (we keep a quarter for the virtual memmap)
56 #define H_VMALLOC_START H_KERN_VIRT_START
57 #define H_VMALLOC_SIZE (H_KERN_VIRT_SIZE >> 1)
58 #define H_VMALLOC_END (H_VMALLOC_START + H_VMALLOC_SIZE)
63 #define REGION_SHIFT 60UL
64 #define REGION_MASK (0xfUL << REGION_SHIFT)
65 #define REGION_ID(ea) (((unsigned long)(ea)) >> REGION_SHIFT)
67 #define VMALLOC_REGION_ID (REGION_ID(H_VMALLOC_START))
68 #define KERNEL_REGION_ID (REGION_ID(PAGE_OFFSET))
69 #define VMEMMAP_REGION_ID (0xfUL) /* Server only */
70 #define USER_REGION_ID (0UL)
73 * Defines the address of the vmemap area, in its own region on
76 #define H_VMEMMAP_BASE (VMEMMAP_REGION_ID << REGION_SHIFT)
78 #ifdef CONFIG_PPC_MM_SLICES
79 #define HAVE_ARCH_UNMAPPED_AREA
80 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
81 #endif /* CONFIG_PPC_MM_SLICES */
85 #define _PTEIDX_SECONDARY 0x8
86 #define _PTEIDX_GROUP_IX 0x7
88 #define H_PMD_BAD_BITS (PTE_TABLE_SIZE-1)
89 #define H_PUD_BAD_BITS (PMD_TABLE_SIZE-1)
92 #define hash__pmd_bad(pmd) (pmd_val(pmd) & H_PMD_BAD_BITS)
93 #define hash__pud_bad(pud) (pud_val(pud) & H_PUD_BAD_BITS)
94 static inline int hash__pgd_bad(pgd_t pgd
)
96 return (pgd_val(pgd
) == 0);
99 extern void hpte_need_flush(struct mm_struct
*mm
, unsigned long addr
,
100 pte_t
*ptep
, unsigned long pte
, int huge
);
101 extern unsigned long htab_convert_pte_flags(unsigned long pteflags
);
102 /* Atomic PTE updates */
103 static inline unsigned long hash__pte_update(struct mm_struct
*mm
,
105 pte_t
*ptep
, unsigned long clr
,
109 __be64 old_be
, tmp_be
;
112 __asm__
__volatile__(
113 "1: ldarx %0,0,%3 # pte_update\n\
120 : "=&r" (old_be
), "=&r" (tmp_be
), "=m" (*ptep
)
121 : "r" (ptep
), "r" (cpu_to_be64(clr
)), "m" (*ptep
),
122 "r" (cpu_to_be64(H_PAGE_BUSY
)), "r" (cpu_to_be64(set
))
124 /* huge pages use the old page table lock */
126 assert_pte_locked(mm
, addr
);
128 old
= be64_to_cpu(old_be
);
129 if (old
& H_PAGE_HASHPTE
)
130 hpte_need_flush(mm
, addr
, ptep
, old
, huge
);
135 /* Set the dirty and/or accessed bits atomically in a linux PTE, this
136 * function doesn't need to flush the hash entry
138 static inline void hash__ptep_set_access_flags(pte_t
*ptep
, pte_t entry
)
140 __be64 old
, tmp
, val
, mask
;
142 mask
= cpu_to_be64(_PAGE_DIRTY
| _PAGE_ACCESSED
| _PAGE_READ
| _PAGE_WRITE
|
143 _PAGE_EXEC
| _PAGE_SOFT_DIRTY
);
145 val
= pte_raw(entry
) & mask
;
147 __asm__
__volatile__(
154 :"=&r" (old
), "=&r" (tmp
), "=m" (*ptep
)
155 :"r" (val
), "r" (ptep
), "m" (*ptep
), "r" (cpu_to_be64(H_PAGE_BUSY
))
159 static inline int hash__pte_same(pte_t pte_a
, pte_t pte_b
)
161 return (((pte_raw(pte_a
) ^ pte_raw(pte_b
)) & ~cpu_to_be64(_PAGE_HPTEFLAGS
)) == 0);
164 static inline int hash__pte_none(pte_t pte
)
166 return (pte_val(pte
) & ~H_PTE_NONE_MASK
) == 0;
169 /* This low level function performs the actual PTE insertion
170 * Setting the PTE depends on the MMU type and other factors. It's
171 * an horrible mess that I'm not going to try to clean up now but
172 * I'm keeping it in one place rather than spread around
174 static inline void hash__set_pte_at(struct mm_struct
*mm
, unsigned long addr
,
175 pte_t
*ptep
, pte_t pte
, int percpu
)
178 * Anything else just stores the PTE normally. That covers all 64-bit
179 * cases, and 32-bit non-hash with 32-bit PTEs.
184 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
185 extern void hpte_do_hugepage_flush(struct mm_struct
*mm
, unsigned long addr
,
186 pmd_t
*pmdp
, unsigned long old_pmd
);
188 static inline void hpte_do_hugepage_flush(struct mm_struct
*mm
,
189 unsigned long addr
, pmd_t
*pmdp
,
190 unsigned long old_pmd
)
192 WARN(1, "%s called with THP disabled\n", __func__
);
194 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
197 extern int hash__map_kernel_page(unsigned long ea
, unsigned long pa
,
198 unsigned long flags
);
199 extern int __meminit
hash__vmemmap_create_mapping(unsigned long start
,
200 unsigned long page_size
,
202 extern void hash__vmemmap_remove_mapping(unsigned long start
,
203 unsigned long page_size
);
205 int hash__create_section_mapping(unsigned long start
, unsigned long end
);
206 int hash__remove_section_mapping(unsigned long start
, unsigned long end
);
208 #endif /* !__ASSEMBLY__ */
209 #endif /* __KERNEL__ */
210 #endif /* _ASM_POWERPC_BOOK3S_64_HASH_H */