Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
[cris-mirror.git] / arch / sh / include / asm / pgtable_64.h
blob07424968df6210154f188c212b2e97f12f259e6c
1 #ifndef __ASM_SH_PGTABLE_64_H
2 #define __ASM_SH_PGTABLE_64_H
4 /*
5 * include/asm-sh/pgtable_64.h
7 * This file contains the functions and defines necessary to modify and use
8 * the SuperH page table tree.
10 * Copyright (C) 2000, 2001 Paolo Alberelli
11 * Copyright (C) 2003, 2004 Paul Mundt
12 * Copyright (C) 2003, 2004 Richard Curnow
14 * This file is subject to the terms and conditions of the GNU General Public
15 * License. See the file "COPYING" in the main directory of this archive
16 * for more details.
18 #include <linux/threads.h>
19 #include <asm/processor.h>
20 #include <asm/page.h>
23 * Error outputs.
25 #define pte_ERROR(e) \
26 printk("%s:%d: bad pte %016Lx.\n", __FILE__, __LINE__, pte_val(e))
27 #define pgd_ERROR(e) \
28 printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
31 * Table setting routines. Used within arch/mm only.
33 #define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval)
35 static __inline__ void set_pte(pte_t *pteptr, pte_t pteval)
37 unsigned long long x = ((unsigned long long) pteval.pte_low);
38 unsigned long long *xp = (unsigned long long *) pteptr;
40 * Sign-extend based on NPHYS.
42 *(xp) = (x & NPHYS_SIGN) ? (x | NPHYS_MASK) : x;
44 #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
47 * PGD defines. Top level.
50 /* To find an entry in a generic PGD. */
51 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
52 #define __pgd_offset(address) pgd_index(address)
53 #define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
55 /* To find an entry in a kernel PGD. */
56 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
58 #define __pud_offset(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
59 #define __pmd_offset(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
62 * PMD level access routines. Same notes as above.
64 #define _PMD_EMPTY 0x0
65 /* Either the PMD is empty or present, it's not paged out */
66 #define pmd_present(pmd_entry) (pmd_val(pmd_entry) & _PAGE_PRESENT)
67 #define pmd_clear(pmd_entry_p) (set_pmd((pmd_entry_p), __pmd(_PMD_EMPTY)))
68 #define pmd_none(pmd_entry) (pmd_val((pmd_entry)) == _PMD_EMPTY)
69 #define pmd_bad(pmd_entry) ((pmd_val(pmd_entry) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
71 #define pmd_page_vaddr(pmd_entry) \
72 ((unsigned long) __va(pmd_val(pmd_entry) & PAGE_MASK))
74 #define pmd_page(pmd) \
75 (virt_to_page(pmd_val(pmd)))
77 /* PMD to PTE dereferencing */
78 #define pte_index(address) \
79 ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
81 #define __pte_offset(address) pte_index(address)
83 #define pte_offset_kernel(dir, addr) \
84 ((pte_t *) ((pmd_val(*(dir))) & PAGE_MASK) + pte_index((addr)))
86 #define pte_offset_map(dir,addr) pte_offset_kernel(dir, addr)
87 #define pte_unmap(pte) do { } while (0)
89 #ifndef __ASSEMBLY__
91 * PTEL coherent flags.
92 * See Chapter 17 ST50 CPU Core Volume 1, Architecture.
94 /* The bits that are required in the SH-5 TLB are placed in the h/w-defined
95 positions, to avoid expensive bit shuffling on every refill. The remaining
96 bits are used for s/w purposes and masked out on each refill.
98 Note, the PTE slots are used to hold data of type swp_entry_t when a page is
99 swapped out. Only the _PAGE_PRESENT flag is significant when the page is
100 swapped out, and it must be placed so that it doesn't overlap either the
101 type or offset fields of swp_entry_t. For x86, offset is at [31:8] and type
102 at [6:1], with _PAGE_PRESENT at bit 0 for both pte_t and swp_entry_t. This
103 scheme doesn't map to SH-5 because bit [0] controls cacheability. So bit
104 [2] is used for _PAGE_PRESENT and the type field of swp_entry_t is split
105 into 2 pieces. That is handled by SWP_ENTRY and SWP_TYPE below. */
106 #define _PAGE_WT 0x001 /* CB0: if cacheable, 1->write-thru, 0->write-back */
107 #define _PAGE_DEVICE 0x001 /* CB0: if uncacheable, 1->device (i.e. no write-combining or reordering at bus level) */
108 #define _PAGE_CACHABLE 0x002 /* CB1: uncachable/cachable */
109 #define _PAGE_PRESENT 0x004 /* software: page referenced */
110 #define _PAGE_SIZE0 0x008 /* SZ0-bit : size of page */
111 #define _PAGE_SIZE1 0x010 /* SZ1-bit : size of page */
112 #define _PAGE_SHARED 0x020 /* software: reflects PTEH's SH */
113 #define _PAGE_READ 0x040 /* PR0-bit : read access allowed */
114 #define _PAGE_EXECUTE 0x080 /* PR1-bit : execute access allowed */
115 #define _PAGE_WRITE 0x100 /* PR2-bit : write access allowed */
116 #define _PAGE_USER 0x200 /* PR3-bit : user space access allowed */
117 #define _PAGE_DIRTY 0x400 /* software: page accessed in write */
118 #define _PAGE_ACCESSED 0x800 /* software: page referenced */
120 /* Wrapper for extended mode pgprot twiddling */
121 #define _PAGE_EXT(x) ((unsigned long long)(x) << 32)
124 * We can use the sign-extended bits in the PTEL to get 32 bits of
125 * software flags. This works for now because no implementations uses
126 * anything above the PPN field.
128 #define _PAGE_WIRED _PAGE_EXT(0x001) /* software: wire the tlb entry */
129 #define _PAGE_SPECIAL _PAGE_EXT(0x002)
131 #define _PAGE_CLEAR_FLAGS (_PAGE_PRESENT | _PAGE_SHARED | \
132 _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_WIRED)
134 /* Mask which drops software flags */
135 #define _PAGE_FLAGS_HARDWARE_MASK (NEFF_MASK & ~(_PAGE_CLEAR_FLAGS))
138 * HugeTLB support
140 #if defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
141 #define _PAGE_SZHUGE (_PAGE_SIZE0)
142 #elif defined(CONFIG_HUGETLB_PAGE_SIZE_1MB)
143 #define _PAGE_SZHUGE (_PAGE_SIZE1)
144 #elif defined(CONFIG_HUGETLB_PAGE_SIZE_512MB)
145 #define _PAGE_SZHUGE (_PAGE_SIZE0 | _PAGE_SIZE1)
146 #endif
149 * Stub out _PAGE_SZHUGE if we don't have a good definition for it,
150 * to make pte_mkhuge() happy.
152 #ifndef _PAGE_SZHUGE
153 # define _PAGE_SZHUGE (0)
154 #endif
157 * Default flags for a Kernel page.
158 * This is fundametally also SHARED because the main use of this define
159 * (other than for PGD/PMD entries) is for the VMALLOC pool which is
160 * contextless.
162 * _PAGE_EXECUTE is required for modules
165 #define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
166 _PAGE_EXECUTE | \
167 _PAGE_CACHABLE | _PAGE_ACCESSED | _PAGE_DIRTY | \
168 _PAGE_SHARED)
170 /* Default flags for a User page */
171 #define _PAGE_TABLE (_KERNPG_TABLE | _PAGE_USER)
173 #define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | \
174 _PAGE_SPECIAL)
177 * We have full permissions (Read/Write/Execute/Shared).
179 #define _PAGE_COMMON (_PAGE_PRESENT | _PAGE_USER | \
180 _PAGE_CACHABLE | _PAGE_ACCESSED)
182 #define PAGE_NONE __pgprot(_PAGE_CACHABLE | _PAGE_ACCESSED)
183 #define PAGE_SHARED __pgprot(_PAGE_COMMON | _PAGE_READ | _PAGE_WRITE | \
184 _PAGE_SHARED)
185 #define PAGE_EXECREAD __pgprot(_PAGE_COMMON | _PAGE_READ | _PAGE_EXECUTE)
188 * We need to include PAGE_EXECUTE in PAGE_COPY because it is the default
189 * protection mode for the stack.
191 #define PAGE_COPY PAGE_EXECREAD
193 #define PAGE_READONLY __pgprot(_PAGE_COMMON | _PAGE_READ)
194 #define PAGE_WRITEONLY __pgprot(_PAGE_COMMON | _PAGE_WRITE)
195 #define PAGE_RWX __pgprot(_PAGE_COMMON | _PAGE_READ | \
196 _PAGE_WRITE | _PAGE_EXECUTE)
197 #define PAGE_KERNEL __pgprot(_KERNPG_TABLE)
199 #define PAGE_KERNEL_NOCACHE \
200 __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
201 _PAGE_EXECUTE | _PAGE_ACCESSED | \
202 _PAGE_DIRTY | _PAGE_SHARED)
204 /* Make it a device mapping for maximum safety (e.g. for mapping device
205 registers into user-space via /dev/map). */
206 #define pgprot_noncached(x) __pgprot(((x).pgprot & ~(_PAGE_CACHABLE)) | _PAGE_DEVICE)
207 #define pgprot_writecombine(prot) __pgprot(pgprot_val(prot) & ~_PAGE_CACHABLE)
210 * PTE level access routines.
212 * Note1:
213 * It's the tree walk leaf. This is physical address to be stored.
215 * Note 2:
216 * Regarding the choice of _PTE_EMPTY:
218 We must choose a bit pattern that cannot be valid, whether or not the page
219 is present. bit[2]==1 => present, bit[2]==0 => swapped out. If swapped
220 out, bits [31:8], [6:3], [1:0] are under swapper control, so only bit[7] is
221 left for us to select. If we force bit[7]==0 when swapped out, we could use
222 the combination bit[7,2]=2'b10 to indicate an empty PTE. Alternatively, if
223 we force bit[7]==1 when swapped out, we can use all zeroes to indicate
224 empty. This is convenient, because the page tables get cleared to zero
225 when they are allocated.
228 #define _PTE_EMPTY 0x0
229 #define pte_present(x) (pte_val(x) & _PAGE_PRESENT)
230 #define pte_clear(mm,addr,xp) (set_pte_at(mm, addr, xp, __pte(_PTE_EMPTY)))
231 #define pte_none(x) (pte_val(x) == _PTE_EMPTY)
234 * Some definitions to translate between mem_map, PTEs, and page
235 * addresses:
239 * Given a PTE, return the index of the mem_map[] entry corresponding
240 * to the page frame the PTE. Get the absolute physical address, make
241 * a relative physical address and translate it to an index.
243 #define pte_pagenr(x) (((unsigned long) (pte_val(x)) - \
244 __MEMORY_START) >> PAGE_SHIFT)
247 * Given a PTE, return the "struct page *".
249 #define pte_page(x) (mem_map + pte_pagenr(x))
252 * Return number of (down rounded) MB corresponding to x pages.
254 #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
258 * The following have defined behavior only work if pte_present() is true.
260 static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
261 static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
262 static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; }
263 static inline int pte_special(pte_t pte){ return pte_val(pte) & _PAGE_SPECIAL; }
265 static inline pte_t pte_wrprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_WRITE)); return pte; }
266 static inline pte_t pte_mkclean(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_DIRTY)); return pte; }
267 static inline pte_t pte_mkold(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_ACCESSED)); return pte; }
268 static inline pte_t pte_mkwrite(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_WRITE)); return pte; }
269 static inline pte_t pte_mkdirty(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_DIRTY)); return pte; }
270 static inline pte_t pte_mkyoung(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_ACCESSED)); return pte; }
271 static inline pte_t pte_mkhuge(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_SZHUGE)); return pte; }
272 static inline pte_t pte_mkspecial(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_SPECIAL)); return pte; }
275 * Conversion functions: convert a page and protection to a page entry.
277 * extern pte_t mk_pte(struct page *page, pgprot_t pgprot)
279 #define mk_pte(page,pgprot) \
280 ({ \
281 pte_t __pte; \
283 set_pte(&__pte, __pte((((page)-mem_map) << PAGE_SHIFT) | \
284 __MEMORY_START | pgprot_val((pgprot)))); \
285 __pte; \
289 * This takes a (absolute) physical page address that is used
290 * by the remapping functions
292 #define mk_pte_phys(physpage, pgprot) \
293 ({ pte_t __pte; set_pte(&__pte, __pte(physpage | pgprot_val(pgprot))); __pte; })
295 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
296 { set_pte(&pte, __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot))); return pte; }
298 /* Encode and decode a swap entry */
299 #define __swp_type(x) (((x).val & 3) + (((x).val >> 1) & 0x3c))
300 #define __swp_offset(x) ((x).val >> 8)
301 #define __swp_entry(type, offset) ((swp_entry_t) { ((offset << 8) + ((type & 0x3c) << 1) + (type & 3)) })
302 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
303 #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
305 #endif /* !__ASSEMBLY__ */
307 #define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
308 #define pfn_pmd(pfn, prot) __pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
310 #endif /* __ASM_SH_PGTABLE_64_H */