x86/mm/pat: Don't report PAT on CPUs that don't support it
[linux/fpc-iii.git] / arch / x86 / include / asm / pgtable_32.h
blobfbc73360aea05128d6b40be23e261893cd47a523
1 #ifndef _ASM_X86_PGTABLE_32_H
2 #define _ASM_X86_PGTABLE_32_H
4 #include <asm/pgtable_32_types.h>
6 /*
7 * The Linux memory management assumes a three-level page table setup. On
8 * the i386, we use that, but "fold" the mid level into the top-level page
9 * table, so that we physically have the same two-level page table as the
10 * i386 mmu expects.
12 * This file contains the functions and defines necessary to modify and use
13 * the i386 page table tree.
15 #ifndef __ASSEMBLY__
16 #include <asm/processor.h>
17 #include <asm/fixmap.h>
18 #include <linux/threads.h>
19 #include <asm/paravirt.h>
21 #include <linux/bitops.h>
22 #include <linux/list.h>
23 #include <linux/spinlock.h>
25 struct mm_struct;
26 struct vm_area_struct;
28 extern pgd_t swapper_pg_dir[1024];
29 extern pgd_t initial_page_table[1024];
30 extern pmd_t initial_pg_pmd[];
32 static inline void pgtable_cache_init(void) { }
33 static inline void check_pgt_cache(void) { }
34 void paging_init(void);
37 * Define this if things work differently on an i386 and an i486:
38 * it will (on an i486) warn about kernel memory accesses that are
39 * done without a 'access_ok(VERIFY_WRITE,..)'
41 #undef TEST_ACCESS_OK
43 #ifdef CONFIG_X86_PAE
44 # include <asm/pgtable-3level.h>
45 #else
46 # include <asm/pgtable-2level.h>
47 #endif
49 #if defined(CONFIG_HIGHPTE)
50 #define pte_offset_map(dir, address) \
51 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
52 pte_index((address)))
53 #define pte_unmap(pte) kunmap_atomic((pte))
54 #else
55 #define pte_offset_map(dir, address) \
56 ((pte_t *)page_address(pmd_page(*(dir))) + pte_index((address)))
57 #define pte_unmap(pte) do { } while (0)
58 #endif
60 /* Clear a kernel PTE and flush it from the TLB */
61 #define kpte_clear_flush(ptep, vaddr) \
62 do { \
63 pte_clear(&init_mm, (vaddr), (ptep)); \
64 __flush_tlb_one((vaddr)); \
65 } while (0)
67 #endif /* !__ASSEMBLY__ */
70 * kern_addr_valid() is (1) for FLATMEM and (0) for
71 * SPARSEMEM and DISCONTIGMEM
73 #ifdef CONFIG_FLATMEM
74 #define kern_addr_valid(addr) (1)
75 #else
76 #define kern_addr_valid(kaddr) (0)
77 #endif
80 * This is how much memory in addition to the memory covered up to
81 * and including _end we need mapped initially.
82 * We need:
83 * (KERNEL_IMAGE_SIZE/4096) / 1024 pages (worst case, non PAE)
84 * (KERNEL_IMAGE_SIZE/4096) / 512 + 4 pages (worst case for PAE)
86 * Modulo rounding, each megabyte assigned here requires a kilobyte of
87 * memory, which is currently unreclaimed.
89 * This should be a multiple of a page.
91 * KERNEL_IMAGE_SIZE should be greater than pa(_end)
92 * and small than max_low_pfn, otherwise will waste some page table entries
94 #if PTRS_PER_PMD > 1
95 #define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
96 #else
97 #define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
98 #endif
101 * Number of possible pages in the lowmem region.
103 * We shift 2 by 31 instead of 1 by 32 to the left in order to avoid a
104 * gas warning about overflowing shift count when gas has been compiled
105 * with only a host target support using a 32-bit type for internal
106 * representation.
108 #define LOWMEM_PAGES ((((2<<31) - __PAGE_OFFSET) >> PAGE_SHIFT))
110 #endif /* _ASM_X86_PGTABLE_32_H */