1 #ifndef _I386_PGTABLE_H
2 #define _I386_PGTABLE_H
6 * The Linux memory management assumes a three-level page table setup. On
7 * the i386, we use that, but "fold" the mid level into the top-level page
8 * table, so that we physically have the same two-level page table as the
11 * This file contains the functions and defines necessary to modify and use
12 * the i386 page table tree.
15 #include <asm/processor.h>
16 #include <asm/fixmap.h>
17 #include <linux/threads.h>
18 #include <asm/paravirt.h>
20 #include <linux/bitops.h>
21 #include <linux/slab.h>
22 #include <linux/list.h>
23 #include <linux/spinlock.h>
26 struct vm_area_struct
;
28 extern pgd_t swapper_pg_dir
[1024];
29 extern struct kmem_cache
*pmd_cache
;
30 extern spinlock_t pgd_lock
;
31 extern struct page
*pgd_list
;
32 void check_pgt_cache(void);
34 void pmd_ctor(struct kmem_cache
*, void *);
35 void pgtable_cache_init(void);
36 void paging_init(void);
40 * The Linux x86 paging architecture is 'compile-time dual-mode', it
41 * implements both the traditional 2-level x86 page tables and the
42 * newer 3-level PAE-mode page tables.
45 # include <asm/pgtable-3level-defs.h>
46 # define PMD_SIZE (1UL << PMD_SHIFT)
47 # define PMD_MASK (~(PMD_SIZE-1))
49 # include <asm/pgtable-2level-defs.h>
52 #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
53 #define PGDIR_MASK (~(PGDIR_SIZE-1))
55 #define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT)
56 #define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS)
58 #define TWOLEVEL_PGDIR_SHIFT 22
59 #define BOOT_USER_PGD_PTRS (__PAGE_OFFSET >> TWOLEVEL_PGDIR_SHIFT)
60 #define BOOT_KERNEL_PGD_PTRS (1024-BOOT_USER_PGD_PTRS)
62 /* Just any arbitrary offset to the start of the vmalloc VM area: the
63 * current 8MB value just means that there will be a 8MB "hole" after the
64 * physical memory until the kernel virtual memory starts. That means that
65 * any out-of-bounds memory accesses will hopefully be caught.
66 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
67 * area for the same reason. ;)
69 #define VMALLOC_OFFSET (8*1024*1024)
70 #define VMALLOC_START (((unsigned long) high_memory + \
71 2*VMALLOC_OFFSET-1) & ~(VMALLOC_OFFSET-1))
73 # define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE)
75 # define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE)
79 * Define this if things work differently on an i386 and an i486:
80 * it will (on an i486) warn about kernel memory accesses that are
81 * done without a 'access_ok(VERIFY_WRITE,..)'
85 /* The boot page tables (all created as a single array) */
86 extern unsigned long pg0
[];
88 #define pte_present(x) ((x).pte_low & (_PAGE_PRESENT | _PAGE_PROTNONE))
90 /* To avoid harmful races, pmd_none(x) should check only the lower when PAE */
91 #define pmd_none(x) (!(unsigned long)pmd_val(x))
92 #define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
93 #define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
96 #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
99 # include <asm/pgtable-3level.h>
101 # include <asm/pgtable-2level.h>
105 * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
107 * dst - pointer to pgd range anwhere on a pgd page
109 * count - the number of pgds to copy.
111 * dst and src can be on the same page, but the range must not overlap,
112 * and must not cross a page boundary.
114 static inline void clone_pgd_range(pgd_t
*dst
, pgd_t
*src
, int count
)
116 memcpy(dst
, src
, count
* sizeof(pgd_t
));
120 * Macro to mark a page protection value as "uncacheable". On processors which do not support
121 * it, this is a no-op.
123 #define pgprot_noncached(prot) ((boot_cpu_data.x86 > 3) \
124 ? (__pgprot(pgprot_val(prot) | _PAGE_PCD | _PAGE_PWT)) : (prot))
127 * Conversion functions: convert a page and protection to a page entry,
128 * and a page entry and page directory to the page they refer to.
131 #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
134 * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
136 * this macro returns the index of the entry in the pgd page which would
137 * control the given virtual address
139 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
140 #define pgd_index_k(addr) pgd_index(addr)
143 * pgd_offset() returns a (pgd_t *)
144 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
146 #define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
149 * a shortcut which implies the use of the kernel's pgd, instead
152 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
155 * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
157 * this macro returns the index of the entry in the pmd page which would
158 * control the given virtual address
160 #define pmd_index(address) \
161 (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
164 * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
166 * this macro returns the index of the entry in the pte page which would
167 * control the given virtual address
169 #define pte_index(address) \
170 (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
171 #define pte_offset_kernel(dir, address) \
172 ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address))
174 #define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
176 #define pmd_page_vaddr(pmd) \
177 ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
180 * Helper function that returns the kernel pagetable entry controlling
181 * the virtual address 'address'. NULL means no pagetable entry present.
182 * NOTE: the return type is pte_t but if the pmd is PSE then we return it
185 extern pte_t
*lookup_address(unsigned long address
, int *level
);
187 #if defined(CONFIG_HIGHPTE)
188 #define pte_offset_map(dir, address) \
189 ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)),KM_PTE0) + pte_index(address))
190 #define pte_offset_map_nested(dir, address) \
191 ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)),KM_PTE1) + pte_index(address))
192 #define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0)
193 #define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1)
195 #define pte_offset_map(dir, address) \
196 ((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address))
197 #define pte_offset_map_nested(dir, address) pte_offset_map(dir, address)
198 #define pte_unmap(pte) do { } while (0)
199 #define pte_unmap_nested(pte) do { } while (0)
202 /* Clear a kernel PTE and flush it from the TLB */
203 #define kpte_clear_flush(ptep, vaddr) \
205 pte_clear(&init_mm, vaddr, ptep); \
206 __flush_tlb_one(vaddr); \
210 * The i386 doesn't have any external MMU info: the kernel page
211 * tables contain all the necessary information.
213 #define update_mmu_cache(vma,address,pte) do { } while (0)
215 void native_pagetable_setup_start(pgd_t
*base
);
216 void native_pagetable_setup_done(pgd_t
*base
);
218 #ifndef CONFIG_PARAVIRT
219 static inline void paravirt_pagetable_setup_start(pgd_t
*base
)
221 native_pagetable_setup_start(base
);
224 static inline void paravirt_pagetable_setup_done(pgd_t
*base
)
226 native_pagetable_setup_done(base
);
228 #endif /* !CONFIG_PARAVIRT */
230 #endif /* !__ASSEMBLY__ */
233 * kern_addr_valid() is (1) for FLATMEM and (0) for
234 * SPARSEMEM and DISCONTIGMEM
236 #ifdef CONFIG_FLATMEM
237 #define kern_addr_valid(addr) (1)
239 #define kern_addr_valid(kaddr) (0)
242 #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
243 remap_pfn_range(vma, vaddr, pfn, size, prot)
245 #endif /* _I386_PGTABLE_H */