2 * linux/arch/i386/mm/pgtable.c
5 #include <linux/sched.h>
6 #include <linux/kernel.h>
7 #include <linux/errno.h>
10 #include <linux/swap.h>
11 #include <linux/smp.h>
12 #include <linux/highmem.h>
13 #include <linux/slab.h>
14 #include <linux/pagemap.h>
15 #include <linux/spinlock.h>
16 #include <linux/module.h>
17 #include <linux/quicklist.h>
19 #include <asm/system.h>
20 #include <asm/pgtable.h>
21 #include <asm/pgalloc.h>
22 #include <asm/fixmap.h>
25 #include <asm/tlbflush.h>
29 int total
= 0, reserved
= 0;
30 int shared
= 0, cached
= 0;
37 printk(KERN_INFO
"Mem-info:\n");
39 for_each_online_pgdat(pgdat
) {
40 pgdat_resize_lock(pgdat
, &flags
);
41 for (i
= 0; i
< pgdat
->node_spanned_pages
; ++i
) {
42 if (unlikely(i
% MAX_ORDER_NR_PAGES
== 0))
44 page
= pgdat_page_nr(pgdat
, i
);
46 if (PageHighMem(page
))
48 if (PageReserved(page
))
50 else if (PageSwapCache(page
))
52 else if (page_count(page
))
53 shared
+= page_count(page
) - 1;
55 pgdat_resize_unlock(pgdat
, &flags
);
57 printk(KERN_INFO
"%d pages of RAM\n", total
);
58 printk(KERN_INFO
"%d pages of HIGHMEM\n", highmem
);
59 printk(KERN_INFO
"%d reserved pages\n", reserved
);
60 printk(KERN_INFO
"%d pages shared\n", shared
);
61 printk(KERN_INFO
"%d pages swap cached\n", cached
);
63 printk(KERN_INFO
"%lu pages dirty\n", global_page_state(NR_FILE_DIRTY
));
64 printk(KERN_INFO
"%lu pages writeback\n",
65 global_page_state(NR_WRITEBACK
));
66 printk(KERN_INFO
"%lu pages mapped\n", global_page_state(NR_FILE_MAPPED
));
67 printk(KERN_INFO
"%lu pages slab\n",
68 global_page_state(NR_SLAB_RECLAIMABLE
) +
69 global_page_state(NR_SLAB_UNRECLAIMABLE
));
70 printk(KERN_INFO
"%lu pages pagetables\n",
71 global_page_state(NR_PAGETABLE
));
75 * Associate a virtual page frame with a given physical page frame
76 * and protection flags for that frame.
78 static void set_pte_pfn(unsigned long vaddr
, unsigned long pfn
, pgprot_t flags
)
85 pgd
= swapper_pg_dir
+ pgd_index(vaddr
);
90 pud
= pud_offset(pgd
, vaddr
);
95 pmd
= pmd_offset(pud
, vaddr
);
100 pte
= pte_offset_kernel(pmd
, vaddr
);
101 if (pgprot_val(flags
))
102 set_pte_present(&init_mm
, vaddr
, pte
, pfn_pte(pfn
, flags
));
104 pte_clear(&init_mm
, vaddr
, pte
);
107 * It's enough to flush this one mapping.
108 * (PGE mappings get flushed as well)
110 __flush_tlb_one(vaddr
);
114 * Associate a large virtual page frame with a given physical page frame
115 * and protection flags for that frame. pfn is for the base of the page,
116 * vaddr is what the page gets mapped to - both must be properly aligned.
117 * The pmd must already be instantiated. Assumes PAE mode.
119 void set_pmd_pfn(unsigned long vaddr
, unsigned long pfn
, pgprot_t flags
)
125 if (vaddr
& (PMD_SIZE
-1)) { /* vaddr is misaligned */
126 printk(KERN_WARNING
"set_pmd_pfn: vaddr misaligned\n");
129 if (pfn
& (PTRS_PER_PTE
-1)) { /* pfn is misaligned */
130 printk(KERN_WARNING
"set_pmd_pfn: pfn misaligned\n");
133 pgd
= swapper_pg_dir
+ pgd_index(vaddr
);
134 if (pgd_none(*pgd
)) {
135 printk(KERN_WARNING
"set_pmd_pfn: pgd_none\n");
138 pud
= pud_offset(pgd
, vaddr
);
139 pmd
= pmd_offset(pud
, vaddr
);
140 set_pmd(pmd
, pfn_pmd(pfn
, flags
));
142 * It's enough to flush this one mapping.
143 * (PGE mappings get flushed as well)
145 __flush_tlb_one(vaddr
);
149 unsigned long __FIXADDR_TOP
= 0xfffff000;
150 EXPORT_SYMBOL(__FIXADDR_TOP
);
152 void __set_fixmap (enum fixed_addresses idx
, unsigned long phys
, pgprot_t flags
)
154 unsigned long address
= __fix_to_virt(idx
);
156 if (idx
>= __end_of_fixed_addresses
) {
160 set_pte_pfn(address
, phys
>> PAGE_SHIFT
, flags
);
165 * reserve_top_address - reserves a hole in the top of kernel address space
166 * @reserve - size of hole to reserve
168 * Can be used to relocate the fixmap area and poke a hole in the top
169 * of kernel address space to make room for a hypervisor.
171 void reserve_top_address(unsigned long reserve
)
174 printk(KERN_INFO
"Reserving virtual address space above 0x%08x\n",
176 __FIXADDR_TOP
= -reserve
- PAGE_SIZE
;
177 __VMALLOC_RESERVE
+= reserve
;
180 pte_t
*pte_alloc_one_kernel(struct mm_struct
*mm
, unsigned long address
)
182 return (pte_t
*)__get_free_page(GFP_KERNEL
|__GFP_REPEAT
|__GFP_ZERO
);
185 pgtable_t
pte_alloc_one(struct mm_struct
*mm
, unsigned long address
)
189 #ifdef CONFIG_HIGHPTE
190 pte
= alloc_pages(GFP_KERNEL
|__GFP_HIGHMEM
|__GFP_REPEAT
|__GFP_ZERO
, 0);
192 pte
= alloc_pages(GFP_KERNEL
|__GFP_REPEAT
|__GFP_ZERO
, 0);
195 pgtable_page_ctor(pte
);
200 * List of all pgd's needed for non-PAE so it can invalidate entries
201 * in both cached and uncached pgd's; not needed for PAE since the
202 * kernel pmd is shared. If PAE were not to share the pmd a similar
203 * tactic would be needed. This is essentially codepath-based locking
204 * against pageattr.c; it is the unique case in which a valid change
205 * of kernel pagetables can't be lazily synchronized by vmalloc faults.
206 * vmalloc faults work because attached pagetables are never freed.
209 static inline void pgd_list_add(pgd_t
*pgd
)
211 struct page
*page
= virt_to_page(pgd
);
213 list_add(&page
->lru
, &pgd_list
);
216 static inline void pgd_list_del(pgd_t
*pgd
)
218 struct page
*page
= virt_to_page(pgd
);
220 list_del(&page
->lru
);
223 #define UNSHARED_PTRS_PER_PGD \
224 (SHARED_KERNEL_PMD ? USER_PTRS_PER_PGD : PTRS_PER_PGD)
226 static void pgd_ctor(void *p
)
231 /* Clear usermode parts of PGD */
232 memset(pgd
, 0, USER_PTRS_PER_PGD
*sizeof(pgd_t
));
234 spin_lock_irqsave(&pgd_lock
, flags
);
236 /* If the pgd points to a shared pagetable level (either the
237 ptes in non-PAE, or shared PMD in PAE), then just copy the
238 references from swapper_pg_dir. */
239 if (PAGETABLE_LEVELS
== 2 ||
240 (PAGETABLE_LEVELS
== 3 && SHARED_KERNEL_PMD
)) {
241 clone_pgd_range(pgd
+ USER_PTRS_PER_PGD
,
242 swapper_pg_dir
+ USER_PTRS_PER_PGD
,
244 paravirt_alloc_pd_clone(__pa(pgd
) >> PAGE_SHIFT
,
245 __pa(swapper_pg_dir
) >> PAGE_SHIFT
,
250 /* list required to sync kernel mapping updates */
251 if (!SHARED_KERNEL_PMD
)
254 spin_unlock_irqrestore(&pgd_lock
, flags
);
257 static void pgd_dtor(void *pgd
)
259 unsigned long flags
; /* can be called from interrupt context */
261 if (SHARED_KERNEL_PMD
)
264 spin_lock_irqsave(&pgd_lock
, flags
);
266 spin_unlock_irqrestore(&pgd_lock
, flags
);
269 #ifdef CONFIG_X86_PAE
271 * Mop up any pmd pages which may still be attached to the pgd.
272 * Normally they will be freed by munmap/exit_mmap, but any pmd we
273 * preallocate which never got a corresponding vma will need to be
276 static void pgd_mop_up_pmds(struct mm_struct
*mm
, pgd_t
*pgdp
)
280 for(i
= 0; i
< UNSHARED_PTRS_PER_PGD
; i
++) {
283 if (pgd_val(pgd
) != 0) {
284 pmd_t
*pmd
= (pmd_t
*)pgd_page_vaddr(pgd
);
286 pgdp
[i
] = native_make_pgd(0);
288 paravirt_release_pd(pgd_val(pgd
) >> PAGE_SHIFT
);
295 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
296 * updating the top-level pagetable entries to guarantee the
297 * processor notices the update. Since this is expensive, and
298 * all 4 top-level entries are used almost immediately in a
299 * new process's life, we just pre-populate them here.
301 * Also, if we're in a paravirt environment where the kernel pmd is
302 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
303 * and initialize the kernel pmds here.
305 static int pgd_prepopulate_pmd(struct mm_struct
*mm
, pgd_t
*pgd
)
311 pud
= pud_offset(pgd
, 0);
312 for (addr
= i
= 0; i
< UNSHARED_PTRS_PER_PGD
;
313 i
++, pud
++, addr
+= PUD_SIZE
) {
314 pmd_t
*pmd
= pmd_alloc_one(mm
, addr
);
317 pgd_mop_up_pmds(mm
, pgd
);
321 if (i
>= USER_PTRS_PER_PGD
)
322 memcpy(pmd
, (pmd_t
*)pgd_page_vaddr(swapper_pg_dir
[i
]),
323 sizeof(pmd_t
) * PTRS_PER_PMD
);
325 pud_populate(mm
, pud
, pmd
);
330 #else /* !CONFIG_X86_PAE */
331 /* No need to prepopulate any pagetable entries in non-PAE modes. */
332 static int pgd_prepopulate_pmd(struct mm_struct
*mm
, pgd_t
*pgd
)
337 static void pgd_mop_up_pmds(struct mm_struct
*mm
, pgd_t
*pgdp
)
340 #endif /* CONFIG_X86_PAE */
342 pgd_t
*pgd_alloc(struct mm_struct
*mm
)
344 pgd_t
*pgd
= (pgd_t
*)__get_free_page(GFP_KERNEL
| __GFP_ZERO
);
346 /* so that alloc_pd can use it */
351 if (pgd
&& !pgd_prepopulate_pmd(mm
, pgd
)) {
353 free_page((unsigned long)pgd
);
360 void pgd_free(struct mm_struct
*mm
, pgd_t
*pgd
)
362 pgd_mop_up_pmds(mm
, pgd
);
364 free_page((unsigned long)pgd
);
367 void __pte_free_tlb(struct mmu_gather
*tlb
, struct page
*pte
)
369 pgtable_page_dtor(pte
);
370 paravirt_release_pt(page_to_pfn(pte
));
371 tlb_remove_page(tlb
, pte
);
374 #ifdef CONFIG_X86_PAE
376 void __pmd_free_tlb(struct mmu_gather
*tlb
, pmd_t
*pmd
)
378 paravirt_release_pd(__pa(pmd
) >> PAGE_SHIFT
);
379 tlb_remove_page(tlb
, virt_to_page(pmd
));
384 int pmd_bad(pmd_t pmd
)
386 WARN_ON_ONCE(pmd_bad_v1(pmd
) != pmd_bad_v2(pmd
));
388 return pmd_bad_v1(pmd
);