xfs: make several more functions static
[linux/fpc-iii.git] / arch / powerpc / include / asm / pgtable.h
blob21207e54825b0566d5edf4c447c6d252ea76b497
1 #ifndef _ASM_POWERPC_PGTABLE_H
2 #define _ASM_POWERPC_PGTABLE_H
3 #ifdef __KERNEL__
5 #ifndef __ASSEMBLY__
6 #include <asm/processor.h> /* For TASK_SIZE */
7 #include <asm/mmu.h>
8 #include <asm/page.h>
10 struct mm_struct;
12 #ifdef CONFIG_DEBUG_VM
13 extern void assert_pte_locked(struct mm_struct *mm, unsigned long addr);
14 #else /* CONFIG_DEBUG_VM */
15 static inline void assert_pte_locked(struct mm_struct *mm, unsigned long addr)
18 #endif /* !CONFIG_DEBUG_VM */
20 #endif /* !__ASSEMBLY__ */
22 #if defined(CONFIG_PPC64)
23 # include <asm/pgtable-ppc64.h>
24 #else
25 # include <asm/pgtable-ppc32.h>
26 #endif
28 #ifndef __ASSEMBLY__
30 /* Generic accessors to PTE bits */
31 static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; }
32 static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
33 static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
34 static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
35 static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; }
36 static inline int pte_present(pte_t pte) { return pte_val(pte) & _PAGE_PRESENT; }
37 static inline int pte_none(pte_t pte) { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; }
38 static inline pgprot_t pte_pgprot(pte_t pte) { return __pgprot(pte_val(pte) & PAGE_PROT_BITS); }
40 /* Conversion functions: convert a page and protection to a page entry,
41 * and a page entry and page directory to the page they refer to.
43 * Even if PTEs can be unsigned long long, a PFN is always an unsigned
44 * long for now.
46 static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) {
47 return __pte(((pte_basic_t)(pfn) << PTE_RPN_SHIFT) |
48 pgprot_val(pgprot)); }
49 static inline unsigned long pte_pfn(pte_t pte) {
50 return pte_val(pte) >> PTE_RPN_SHIFT; }
52 /* Keep these as a macros to avoid include dependency mess */
53 #define pte_page(x) pfn_to_page(pte_pfn(x))
54 #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
56 /* Generic modifiers for PTE bits */
57 static inline pte_t pte_wrprotect(pte_t pte) {
58 pte_val(pte) &= ~(_PAGE_RW | _PAGE_HWWRITE); return pte; }
59 static inline pte_t pte_mkclean(pte_t pte) {
60 pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_HWWRITE); return pte; }
61 static inline pte_t pte_mkold(pte_t pte) {
62 pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
63 static inline pte_t pte_mkwrite(pte_t pte) {
64 pte_val(pte) |= _PAGE_RW; return pte; }
65 static inline pte_t pte_mkdirty(pte_t pte) {
66 pte_val(pte) |= _PAGE_DIRTY; return pte; }
67 static inline pte_t pte_mkyoung(pte_t pte) {
68 pte_val(pte) |= _PAGE_ACCESSED; return pte; }
69 static inline pte_t pte_mkspecial(pte_t pte) {
70 pte_val(pte) |= _PAGE_SPECIAL; return pte; }
71 static inline pte_t pte_mkhuge(pte_t pte) {
72 return pte; }
73 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
75 pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot);
76 return pte;
80 /* Insert a PTE, top-level function is out of line. It uses an inline
81 * low level function in the respective pgtable-* files
83 extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
84 pte_t pte);
86 /* This low level function performs the actual PTE insertion
87 * Setting the PTE depends on the MMU type and other factors. It's
88 * an horrible mess that I'm not going to try to clean up now but
89 * I'm keeping it in one place rather than spread around
91 static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
92 pte_t *ptep, pte_t pte, int percpu)
94 #if defined(CONFIG_PPC_STD_MMU_32) && defined(CONFIG_SMP) && !defined(CONFIG_PTE_64BIT)
95 /* First case is 32-bit Hash MMU in SMP mode with 32-bit PTEs. We use the
96 * helper pte_update() which does an atomic update. We need to do that
97 * because a concurrent invalidation can clear _PAGE_HASHPTE. If it's a
98 * per-CPU PTE such as a kmap_atomic, we do a simple update preserving
99 * the hash bits instead (ie, same as the non-SMP case)
101 if (percpu)
102 *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
103 | (pte_val(pte) & ~_PAGE_HASHPTE));
104 else
105 pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte));
107 #elif defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT)
108 /* Second case is 32-bit with 64-bit PTE. In this case, we
109 * can just store as long as we do the two halves in the right order
110 * with a barrier in between. This is possible because we take care,
111 * in the hash code, to pre-invalidate if the PTE was already hashed,
112 * which synchronizes us with any concurrent invalidation.
113 * In the percpu case, we also fallback to the simple update preserving
114 * the hash bits
116 if (percpu) {
117 *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
118 | (pte_val(pte) & ~_PAGE_HASHPTE));
119 return;
121 #if _PAGE_HASHPTE != 0
122 if (pte_val(*ptep) & _PAGE_HASHPTE)
123 flush_hash_entry(mm, ptep, addr);
124 #endif
125 __asm__ __volatile__("\
126 stw%U0%X0 %2,%0\n\
127 eieio\n\
128 stw%U0%X0 %L2,%1"
129 : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4))
130 : "r" (pte) : "memory");
132 #elif defined(CONFIG_PPC_STD_MMU_32)
133 /* Third case is 32-bit hash table in UP mode, we need to preserve
134 * the _PAGE_HASHPTE bit since we may not have invalidated the previous
135 * translation in the hash yet (done in a subsequent flush_tlb_xxx())
136 * and see we need to keep track that this PTE needs invalidating
138 *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
139 | (pte_val(pte) & ~_PAGE_HASHPTE));
141 #else
142 /* Anything else just stores the PTE normally. That covers all 64-bit
143 * cases, and 32-bit non-hash with 32-bit PTEs.
145 *ptep = pte;
146 #endif
150 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
151 extern int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address,
152 pte_t *ptep, pte_t entry, int dirty);
155 * Macro to mark a page protection value as "uncacheable".
158 #define _PAGE_CACHE_CTL (_PAGE_COHERENT | _PAGE_GUARDED | _PAGE_NO_CACHE | \
159 _PAGE_WRITETHRU)
161 #define pgprot_noncached(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
162 _PAGE_NO_CACHE | _PAGE_GUARDED))
164 #define pgprot_noncached_wc(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
165 _PAGE_NO_CACHE))
167 #define pgprot_cached(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
168 _PAGE_COHERENT))
170 #define pgprot_cached_wthru(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
171 _PAGE_COHERENT | _PAGE_WRITETHRU))
174 struct file;
175 extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
176 unsigned long size, pgprot_t vma_prot);
177 #define __HAVE_PHYS_MEM_ACCESS_PROT
180 * ZERO_PAGE is a global shared page that is always zero: used
181 * for zero-mapped memory areas etc..
183 extern unsigned long empty_zero_page[];
184 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
186 extern pgd_t swapper_pg_dir[];
188 extern void paging_init(void);
191 * kern_addr_valid is intended to indicate whether an address is a valid
192 * kernel address. Most 32-bit archs define it as always true (like this)
193 * but most 64-bit archs actually perform a test. What should we do here?
195 #define kern_addr_valid(addr) (1)
197 #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
198 remap_pfn_range(vma, vaddr, pfn, size, prot)
200 #include <asm-generic/pgtable.h>
204 * This gets called at the end of handling a page fault, when
205 * the kernel has put a new PTE into the page table for the process.
206 * We use it to ensure coherency between the i-cache and d-cache
207 * for the page which has just been mapped in.
208 * On machines which use an MMU hash table, we use this to put a
209 * corresponding HPTE into the hash table ahead of time, instead of
210 * waiting for the inevitable extra hash-table miss exception.
212 extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t);
214 extern int gup_hugepd(hugepd_t *hugepd, unsigned pdshift, unsigned long addr,
215 unsigned long end, int write, struct page **pages, int *nr);
217 #endif /* __ASSEMBLY__ */
219 #endif /* __KERNEL__ */
220 #endif /* _ASM_POWERPC_PGTABLE_H */