init from v2.6.32.60
[mach-moxart.git] / arch / x86 / include / asm / pgtable_32.h
blob750f1bf1fab18747650710c8e3d64b1fc9049511
1 #ifndef _ASM_X86_PGTABLE_32_H
2 #define _ASM_X86_PGTABLE_32_H
4 #include <asm/pgtable_32_types.h>
6 /*
7 * The Linux memory management assumes a three-level page table setup. On
8 * the i386, we use that, but "fold" the mid level into the top-level page
9 * table, so that we physically have the same two-level page table as the
10 * i386 mmu expects.
12 * This file contains the functions and defines necessary to modify and use
13 * the i386 page table tree.
15 #ifndef __ASSEMBLY__
16 #include <asm/processor.h>
17 #include <asm/fixmap.h>
18 #include <linux/threads.h>
19 #include <asm/paravirt.h>
21 #include <linux/bitops.h>
22 #include <linux/slab.h>
23 #include <linux/list.h>
24 #include <linux/spinlock.h>
26 struct mm_struct;
27 struct vm_area_struct;
29 extern pgd_t swapper_pg_dir[1024];
30 extern pgd_t trampoline_pg_dir[1024];
32 static inline void pgtable_cache_init(void) { }
33 static inline void check_pgt_cache(void) { }
34 void paging_init(void);
36 extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
40 * Define this if things work differently on an i386 and an i486:
41 * it will (on an i486) warn about kernel memory accesses that are
42 * done without a 'access_ok(VERIFY_WRITE,..)'
44 #undef TEST_ACCESS_OK
46 #ifdef CONFIG_X86_PAE
47 # include <asm/pgtable-3level.h>
48 #else
49 # include <asm/pgtable-2level.h>
50 #endif
52 #if defined(CONFIG_HIGHPTE)
53 #define __KM_PTE \
54 (in_nmi() ? KM_NMI_PTE : \
55 in_irq() ? KM_IRQ_PTE : \
56 KM_PTE0)
57 #define pte_offset_map(dir, address) \
58 ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)), __KM_PTE) + \
59 pte_index((address)))
60 #define pte_offset_map_nested(dir, address) \
61 ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)), KM_PTE1) + \
62 pte_index((address)))
63 #define pte_unmap(pte) kunmap_atomic((pte), __KM_PTE)
64 #define pte_unmap_nested(pte) kunmap_atomic((pte), KM_PTE1)
65 #else
66 #define pte_offset_map(dir, address) \
67 ((pte_t *)page_address(pmd_page(*(dir))) + pte_index((address)))
68 #define pte_offset_map_nested(dir, address) pte_offset_map((dir), (address))
69 #define pte_unmap(pte) do { } while (0)
70 #define pte_unmap_nested(pte) do { } while (0)
71 #endif
73 /* Clear a kernel PTE and flush it from the TLB */
74 #define kpte_clear_flush(ptep, vaddr) \
75 do { \
76 pte_clear(&init_mm, (vaddr), (ptep)); \
77 __flush_tlb_one((vaddr)); \
78 } while (0)
81 * The i386 doesn't have any external MMU info: the kernel page
82 * tables contain all the necessary information.
84 #define update_mmu_cache(vma, address, pte) do { } while (0)
86 #endif /* !__ASSEMBLY__ */
89 * kern_addr_valid() is (1) for FLATMEM and (0) for
90 * SPARSEMEM and DISCONTIGMEM
92 #ifdef CONFIG_FLATMEM
93 #define kern_addr_valid(addr) (1)
94 #else
95 #define kern_addr_valid(kaddr) (0)
96 #endif
98 #endif /* _ASM_X86_PGTABLE_32_H */