MOXA linux-2.6.x / linux-2.6.9-uc0 from sdlinux-moxaart.tgz
[linux-2.6.9-moxart.git] / include / asm-um / pgtable.h
blobd013cc3763cb0f5e345213812b1a935a385ac9da
1 /*
2 * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
3 * Derived from include/asm-i386/pgtable.h
4 * Licensed under the GPL
5 */
7 #ifndef __UM_PGTABLE_H
8 #define __UM_PGTABLE_H
10 #include "linux/sched.h"
11 #include "asm/processor.h"
12 #include "asm/page.h"
13 #include "asm/fixmap.h"
15 extern void *um_virt_to_phys(struct task_struct *task, unsigned long virt,
16 pte_t *pte_out);
18 /* zero page used for uninitialized stuff */
19 extern unsigned long *empty_zero_page;
21 #define pgtable_cache_init() do ; while (0)
23 /* PMD_SHIFT determines the size of the area a second-level page table can map */
24 #define PMD_SHIFT 22
25 #define PMD_SIZE (1UL << PMD_SHIFT)
26 #define PMD_MASK (~(PMD_SIZE-1))
28 /* PGDIR_SHIFT determines what a third-level page table entry can map */
29 #define PGDIR_SHIFT 22
30 #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
31 #define PGDIR_MASK (~(PGDIR_SIZE-1))
34 * entries per page directory level: the i386 is two-level, so
35 * we don't really have any PMD directory physically.
37 #define PTRS_PER_PTE 1024
38 #define PTRS_PER_PMD 1
39 #define PTRS_PER_PGD 1024
40 #define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE)
41 #define FIRST_USER_PGD_NR 0
43 #define pte_ERROR(e) \
44 printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
45 #define pmd_ERROR(e) \
46 printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e))
47 #define pgd_ERROR(e) \
48 printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
50 extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
53 * pgd entries used up by user/kernel:
56 #define USER_PGD_PTRS (TASK_SIZE >> PGDIR_SHIFT)
57 #define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS)
59 #ifndef __ASSEMBLY__
60 /* Just any arbitrary offset to the start of the vmalloc VM area: the
61 * current 8MB value just means that there will be a 8MB "hole" after the
62 * physical memory until the kernel virtual memory starts. That means that
63 * any out-of-bounds memory accesses will hopefully be caught.
64 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
65 * area for the same reason. ;)
68 extern unsigned long end_iomem;
70 #define VMALLOC_OFFSET (__va_space)
71 #define VMALLOC_START ((end_iomem + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
73 #ifdef CONFIG_HIGHMEM
74 # define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE)
75 #else
76 # define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE)
77 #endif
79 #define _PAGE_PRESENT 0x001
80 #define _PAGE_NEWPAGE 0x002
81 #define _PAGE_NEWPROT 0x004
82 #define _PAGE_FILE 0x008 /* set:pagecache unset:swap */
83 #define _PAGE_PROTNONE 0x010 /* If not present */
84 #define _PAGE_RW 0x020
85 #define _PAGE_USER 0x040
86 #define _PAGE_ACCESSED 0x080
87 #define _PAGE_DIRTY 0x100
89 #define REGION_MASK 0xf0000000
90 #define REGION_SHIFT 28
92 #define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
93 #define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
94 #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
96 #define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
97 #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
98 #define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
99 #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
100 #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
101 #define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_DIRTY | _PAGE_ACCESSED)
104 * The i386 can't do page protection for execute, and considers that the same are read.
105 * Also, write permissions imply read permissions. This is the closest we can get..
107 #define __P000 PAGE_NONE
108 #define __P001 PAGE_READONLY
109 #define __P010 PAGE_COPY
110 #define __P011 PAGE_COPY
111 #define __P100 PAGE_READONLY
112 #define __P101 PAGE_READONLY
113 #define __P110 PAGE_COPY
114 #define __P111 PAGE_COPY
116 #define __S000 PAGE_NONE
117 #define __S001 PAGE_READONLY
118 #define __S010 PAGE_SHARED
119 #define __S011 PAGE_SHARED
120 #define __S100 PAGE_READONLY
121 #define __S101 PAGE_READONLY
122 #define __S110 PAGE_SHARED
123 #define __S111 PAGE_SHARED
126 * Define this if things work differently on an i386 and an i486:
127 * it will (on an i486) warn about kernel memory accesses that are
128 * done without a 'verify_area(VERIFY_WRITE,..)'
130 #undef TEST_VERIFY_AREA
132 /* page table for 0-4MB for everybody */
133 extern unsigned long pg0[1024];
136 * BAD_PAGETABLE is used when we need a bogus page-table, while
137 * BAD_PAGE is used for a bogus page.
139 * ZERO_PAGE is a global shared page that is always zero: used
140 * for zero-mapped memory areas etc..
142 extern pte_t __bad_page(void);
143 extern pte_t * __bad_pagetable(void);
145 #define BAD_PAGETABLE __bad_pagetable()
146 #define BAD_PAGE __bad_page()
148 #define ZERO_PAGE(vaddr) virt_to_page(empty_zero_page)
150 /* number of bits that fit into a memory pointer */
151 #define BITS_PER_PTR (8*sizeof(unsigned long))
153 /* to align the pointer to a pointer address */
154 #define PTR_MASK (~(sizeof(void*)-1))
156 /* sizeof(void*)==1<<SIZEOF_PTR_LOG2 */
157 /* 64-bit machines, beware! SRB. */
158 #define SIZEOF_PTR_LOG2 2
160 /* to find an entry in a page-table */
161 #define PAGE_PTR(address) \
162 ((unsigned long)(address)>>(PAGE_SHIFT-SIZEOF_PTR_LOG2)&PTR_MASK&~PAGE_MASK)
164 #define pte_none(x) !(pte_val(x) & ~_PAGE_NEWPAGE)
165 #define pte_present(x) (pte_val(x) & (_PAGE_PRESENT | _PAGE_PROTNONE))
167 #define pte_clear(xp) do { pte_val(*(xp)) = _PAGE_NEWPAGE; } while (0)
169 #define pmd_none(x) (!(pmd_val(x) & ~_PAGE_NEWPAGE))
170 #define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
171 #define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
172 #define pmd_clear(xp) do { pmd_val(*(xp)) = _PAGE_NEWPAGE; } while (0)
174 #define pmd_newpage(x) (pmd_val(x) & _PAGE_NEWPAGE)
175 #define pmd_mkuptodate(x) (pmd_val(x) &= ~_PAGE_NEWPAGE)
178 * The "pgd_xxx()" functions here are trivial for a folded two-level
179 * setup: the pgd is never bad, and a pmd always exists (as it's folded
180 * into the pgd entry)
182 static inline int pgd_none(pgd_t pgd) { return 0; }
183 static inline int pgd_bad(pgd_t pgd) { return 0; }
184 static inline int pgd_present(pgd_t pgd) { return 1; }
185 static inline void pgd_clear(pgd_t * pgdp) { }
188 #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
190 #define pte_page(pte) phys_to_page(pte_val(pte))
191 #define pmd_page(pmd) phys_to_page(pmd_val(pmd) & PAGE_MASK)
193 #define pte_pfn(x) phys_to_pfn(pte_val(x))
194 #define pfn_pte(pfn, prot) __pte(pfn_to_phys(pfn) | pgprot_val(prot))
196 extern struct page *phys_to_page(const unsigned long phys);
197 extern struct page *__virt_to_page(const unsigned long virt);
198 #define virt_to_page(addr) __virt_to_page((const unsigned long) addr)
201 * Bits 0 through 3 are taken
203 #define PTE_FILE_MAX_BITS 28
205 #define pte_to_pgoff(pte) ((pte).pte_low >> 4)
207 #define pgoff_to_pte(off) \
208 ((pte_t) { ((off) << 4) + _PAGE_FILE })
210 static inline pte_t pte_mknewprot(pte_t pte)
212 pte_val(pte) |= _PAGE_NEWPROT;
213 return(pte);
216 static inline pte_t pte_mknewpage(pte_t pte)
218 pte_val(pte) |= _PAGE_NEWPAGE;
219 return(pte);
222 static inline void set_pte(pte_t *pteptr, pte_t pteval)
224 /* If it's a swap entry, it needs to be marked _PAGE_NEWPAGE so
225 * fix_range knows to unmap it. _PAGE_NEWPROT is specific to
226 * mapped pages.
228 *pteptr = pte_mknewpage(pteval);
229 if(pte_present(*pteptr)) *pteptr = pte_mknewprot(*pteptr);
233 * (pmds are folded into pgds so this doesn't get actually called,
234 * but the define is needed for a generic inline function.)
236 #define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval)
237 #define set_pgd(pgdptr, pgdval) (*(pgdptr) = pgdval)
240 * The following only work if pte_present() is true.
241 * Undefined behaviour if not..
243 static inline int pte_user(pte_t pte)
245 return((pte_val(pte) & _PAGE_USER) &&
246 !(pte_val(pte) & _PAGE_PROTNONE));
249 static inline int pte_read(pte_t pte)
251 return((pte_val(pte) & _PAGE_USER) &&
252 !(pte_val(pte) & _PAGE_PROTNONE));
255 static inline int pte_exec(pte_t pte){
256 return((pte_val(pte) & _PAGE_USER) &&
257 !(pte_val(pte) & _PAGE_PROTNONE));
260 static inline int pte_write(pte_t pte)
262 return((pte_val(pte) & _PAGE_RW) &&
263 !(pte_val(pte) & _PAGE_PROTNONE));
267 * The following only works if pte_present() is not true.
269 static inline int pte_file(pte_t pte)
271 return (pte).pte_low & _PAGE_FILE;
274 static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
275 static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
276 static inline int pte_newpage(pte_t pte) { return pte_val(pte) & _PAGE_NEWPAGE; }
277 static inline int pte_newprot(pte_t pte)
279 return(pte_present(pte) && (pte_val(pte) & _PAGE_NEWPROT));
282 static inline pte_t pte_rdprotect(pte_t pte)
284 pte_val(pte) &= ~_PAGE_USER;
285 return(pte_mknewprot(pte));
288 static inline pte_t pte_exprotect(pte_t pte)
290 pte_val(pte) &= ~_PAGE_USER;
291 return(pte_mknewprot(pte));
294 static inline pte_t pte_mkclean(pte_t pte)
296 pte_val(pte) &= ~_PAGE_DIRTY;
297 return(pte);
300 static inline pte_t pte_mkold(pte_t pte)
302 pte_val(pte) &= ~_PAGE_ACCESSED;
303 return(pte);
306 static inline pte_t pte_wrprotect(pte_t pte)
308 pte_val(pte) &= ~_PAGE_RW;
309 return(pte_mknewprot(pte));
312 static inline pte_t pte_mkread(pte_t pte)
314 pte_val(pte) |= _PAGE_USER;
315 return(pte_mknewprot(pte));
318 static inline pte_t pte_mkexec(pte_t pte)
320 pte_val(pte) |= _PAGE_USER;
321 return(pte_mknewprot(pte));
324 static inline pte_t pte_mkdirty(pte_t pte)
326 pte_val(pte) |= _PAGE_DIRTY;
327 return(pte);
330 static inline pte_t pte_mkyoung(pte_t pte)
332 pte_val(pte) |= _PAGE_ACCESSED;
333 return(pte);
336 static inline pte_t pte_mkwrite(pte_t pte)
338 pte_val(pte) |= _PAGE_RW;
339 return(pte_mknewprot(pte));
342 static inline pte_t pte_mkuptodate(pte_t pte)
344 pte_val(pte) &= ~_PAGE_NEWPAGE;
345 if(pte_present(pte)) pte_val(pte) &= ~_PAGE_NEWPROT;
346 return(pte);
349 extern unsigned long page_to_phys(struct page *page);
352 * Conversion functions: convert a page and protection to a page entry,
353 * and a page entry and page directory to the page they refer to.
356 extern pte_t mk_pte(struct page *page, pgprot_t pgprot);
358 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
360 pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot);
361 if(pte_present(pte)) pte = pte_mknewpage(pte_mknewprot(pte));
362 return pte;
365 #define pmd_page_kernel(pmd) ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
368 * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
370 * this macro returns the index of the entry in the pgd page which would
371 * control the given virtual address
373 #define pgd_index(address) ((address >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
376 * pgd_offset() returns a (pgd_t *)
377 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
379 #define pgd_offset(mm, address) \
380 ((mm)->pgd + ((address) >> PGDIR_SHIFT))
384 * a shortcut which implies the use of the kernel's pgd, instead
385 * of a process's
387 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
389 #define pmd_index(address) \
390 (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
392 /* Find an entry in the second-level page table.. */
393 static inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address)
395 return (pmd_t *) dir;
399 * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
401 * this macro returns the index of the entry in the pte page which would
402 * control the given virtual address
404 #define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
405 #define pte_offset_kernel(dir, address) \
406 ((pte_t *) pmd_page_kernel(*(dir)) + pte_index(address))
407 #define pte_offset_map(dir, address) \
408 ((pte_t *)kmap_atomic(pmd_page(*(dir)),KM_PTE0) + pte_index(address))
409 #define pte_offset_map_nested(dir, address) \
410 ((pte_t *)kmap_atomic(pmd_page(*(dir)),KM_PTE1) + pte_index(address))
411 #define pte_unmap(pte) kunmap_atomic((pte), KM_PTE0)
412 #define pte_unmap_nested(pte) kunmap_atomic((pte), KM_PTE1)
414 #define update_mmu_cache(vma,address,pte) do ; while (0)
416 /* Encode and de-code a swap entry */
417 #define __swp_type(x) (((x).val >> 4) & 0x3f)
418 #define __swp_offset(x) ((x).val >> 11)
420 #define __swp_entry(type, offset) \
421 ((swp_entry_t) { ((type) << 4) | ((offset) << 11) })
422 #define __pte_to_swp_entry(pte) \
423 ((swp_entry_t) { pte_val(pte_mkuptodate(pte)) })
424 #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
426 #define kern_addr_valid(addr) (1)
428 #include <asm-generic/pgtable.h>
430 #endif
432 #endif
434 * Overrides for Emacs so that we follow Linus's tabbing style.
435 * Emacs will notice this stuff at the end of the file and automatically
436 * adjust the settings for this buffer only. This must remain at the end
437 * of the file.
438 * ---------------------------------------------------------------------------
439 * Local variables:
440 * c-file-style: "linux"
441 * End: