2 * linux/arch/i386/mm/pgtable.c
5 #include <linux/sched.h>
6 #include <linux/kernel.h>
7 #include <linux/errno.h>
10 #include <linux/swap.h>
11 #include <linux/smp.h>
12 #include <linux/highmem.h>
13 #include <linux/slab.h>
14 #include <linux/pagemap.h>
15 #include <linux/spinlock.h>
16 #include <linux/module.h>
17 #include <linux/quicklist.h>
19 #include <asm/system.h>
20 #include <asm/pgtable.h>
21 #include <asm/pgalloc.h>
22 #include <asm/fixmap.h>
25 #include <asm/tlbflush.h>
29 int total
= 0, reserved
= 0;
30 int shared
= 0, cached
= 0;
37 printk(KERN_INFO
"Mem-info:\n");
39 printk(KERN_INFO
"Free swap: %6ldkB\n", nr_swap_pages
<<(PAGE_SHIFT
-10));
40 for_each_online_pgdat(pgdat
) {
41 pgdat_resize_lock(pgdat
, &flags
);
42 for (i
= 0; i
< pgdat
->node_spanned_pages
; ++i
) {
43 if (unlikely(i
% MAX_ORDER_NR_PAGES
== 0))
45 page
= pgdat_page_nr(pgdat
, i
);
47 if (PageHighMem(page
))
49 if (PageReserved(page
))
51 else if (PageSwapCache(page
))
53 else if (page_count(page
))
54 shared
+= page_count(page
) - 1;
56 pgdat_resize_unlock(pgdat
, &flags
);
58 printk(KERN_INFO
"%d pages of RAM\n", total
);
59 printk(KERN_INFO
"%d pages of HIGHMEM\n", highmem
);
60 printk(KERN_INFO
"%d reserved pages\n", reserved
);
61 printk(KERN_INFO
"%d pages shared\n", shared
);
62 printk(KERN_INFO
"%d pages swap cached\n", cached
);
64 printk(KERN_INFO
"%lu pages dirty\n", global_page_state(NR_FILE_DIRTY
));
65 printk(KERN_INFO
"%lu pages writeback\n",
66 global_page_state(NR_WRITEBACK
));
67 printk(KERN_INFO
"%lu pages mapped\n", global_page_state(NR_FILE_MAPPED
));
68 printk(KERN_INFO
"%lu pages slab\n",
69 global_page_state(NR_SLAB_RECLAIMABLE
) +
70 global_page_state(NR_SLAB_UNRECLAIMABLE
));
71 printk(KERN_INFO
"%lu pages pagetables\n",
72 global_page_state(NR_PAGETABLE
));
76 * Associate a virtual page frame with a given physical page frame
77 * and protection flags for that frame.
79 static void set_pte_pfn(unsigned long vaddr
, unsigned long pfn
, pgprot_t flags
)
86 pgd
= swapper_pg_dir
+ pgd_index(vaddr
);
91 pud
= pud_offset(pgd
, vaddr
);
96 pmd
= pmd_offset(pud
, vaddr
);
101 pte
= pte_offset_kernel(pmd
, vaddr
);
102 if (pgprot_val(flags
))
103 set_pte_present(&init_mm
, vaddr
, pte
, pfn_pte(pfn
, flags
));
105 pte_clear(&init_mm
, vaddr
, pte
);
108 * It's enough to flush this one mapping.
109 * (PGE mappings get flushed as well)
111 __flush_tlb_one(vaddr
);
115 * Associate a large virtual page frame with a given physical page frame
116 * and protection flags for that frame. pfn is for the base of the page,
117 * vaddr is what the page gets mapped to - both must be properly aligned.
118 * The pmd must already be instantiated. Assumes PAE mode.
120 void set_pmd_pfn(unsigned long vaddr
, unsigned long pfn
, pgprot_t flags
)
126 if (vaddr
& (PMD_SIZE
-1)) { /* vaddr is misaligned */
127 printk(KERN_WARNING
"set_pmd_pfn: vaddr misaligned\n");
130 if (pfn
& (PTRS_PER_PTE
-1)) { /* pfn is misaligned */
131 printk(KERN_WARNING
"set_pmd_pfn: pfn misaligned\n");
134 pgd
= swapper_pg_dir
+ pgd_index(vaddr
);
135 if (pgd_none(*pgd
)) {
136 printk(KERN_WARNING
"set_pmd_pfn: pgd_none\n");
139 pud
= pud_offset(pgd
, vaddr
);
140 pmd
= pmd_offset(pud
, vaddr
);
141 set_pmd(pmd
, pfn_pmd(pfn
, flags
));
143 * It's enough to flush this one mapping.
144 * (PGE mappings get flushed as well)
146 __flush_tlb_one(vaddr
);
150 unsigned long __FIXADDR_TOP
= 0xfffff000;
151 EXPORT_SYMBOL(__FIXADDR_TOP
);
153 void __set_fixmap (enum fixed_addresses idx
, unsigned long phys
, pgprot_t flags
)
155 unsigned long address
= __fix_to_virt(idx
);
157 if (idx
>= __end_of_fixed_addresses
) {
161 set_pte_pfn(address
, phys
>> PAGE_SHIFT
, flags
);
166 * reserve_top_address - reserves a hole in the top of kernel address space
167 * @reserve - size of hole to reserve
169 * Can be used to relocate the fixmap area and poke a hole in the top
170 * of kernel address space to make room for a hypervisor.
172 void reserve_top_address(unsigned long reserve
)
175 printk(KERN_INFO
"Reserving virtual address space above 0x%08x\n",
177 __FIXADDR_TOP
= -reserve
- PAGE_SIZE
;
178 __VMALLOC_RESERVE
+= reserve
;
181 pte_t
*pte_alloc_one_kernel(struct mm_struct
*mm
, unsigned long address
)
183 return (pte_t
*)__get_free_page(GFP_KERNEL
|__GFP_REPEAT
|__GFP_ZERO
);
186 struct page
*pte_alloc_one(struct mm_struct
*mm
, unsigned long address
)
190 #ifdef CONFIG_HIGHPTE
191 pte
= alloc_pages(GFP_KERNEL
|__GFP_HIGHMEM
|__GFP_REPEAT
|__GFP_ZERO
, 0);
193 pte
= alloc_pages(GFP_KERNEL
|__GFP_REPEAT
|__GFP_ZERO
, 0);
199 * List of all pgd's needed for non-PAE so it can invalidate entries
200 * in both cached and uncached pgd's; not needed for PAE since the
201 * kernel pmd is shared. If PAE were not to share the pmd a similar
202 * tactic would be needed. This is essentially codepath-based locking
203 * against pageattr.c; it is the unique case in which a valid change
204 * of kernel pagetables can't be lazily synchronized by vmalloc faults.
205 * vmalloc faults work because attached pagetables are never freed.
208 static inline void pgd_list_add(pgd_t
*pgd
)
210 struct page
*page
= virt_to_page(pgd
);
212 list_add(&page
->lru
, &pgd_list
);
215 static inline void pgd_list_del(pgd_t
*pgd
)
217 struct page
*page
= virt_to_page(pgd
);
219 list_del(&page
->lru
);
222 #define UNSHARED_PTRS_PER_PGD \
223 (SHARED_KERNEL_PMD ? USER_PTRS_PER_PGD : PTRS_PER_PGD)
225 static void pgd_ctor(void *p
)
230 /* Clear usermode parts of PGD */
231 memset(pgd
, 0, USER_PTRS_PER_PGD
*sizeof(pgd_t
));
233 spin_lock_irqsave(&pgd_lock
, flags
);
235 /* If the pgd points to a shared pagetable level (either the
236 ptes in non-PAE, or shared PMD in PAE), then just copy the
237 references from swapper_pg_dir. */
238 if (PAGETABLE_LEVELS
== 2 ||
239 (PAGETABLE_LEVELS
== 3 && SHARED_KERNEL_PMD
)) {
240 clone_pgd_range(pgd
+ USER_PTRS_PER_PGD
,
241 swapper_pg_dir
+ USER_PTRS_PER_PGD
,
243 paravirt_alloc_pd_clone(__pa(pgd
) >> PAGE_SHIFT
,
244 __pa(swapper_pg_dir
) >> PAGE_SHIFT
,
249 /* list required to sync kernel mapping updates */
250 if (!SHARED_KERNEL_PMD
)
253 spin_unlock_irqrestore(&pgd_lock
, flags
);
256 static void pgd_dtor(void *pgd
)
258 unsigned long flags
; /* can be called from interrupt context */
260 if (SHARED_KERNEL_PMD
)
263 spin_lock_irqsave(&pgd_lock
, flags
);
265 spin_unlock_irqrestore(&pgd_lock
, flags
);
268 #ifdef CONFIG_X86_PAE
270 * Mop up any pmd pages which may still be attached to the pgd.
271 * Normally they will be freed by munmap/exit_mmap, but any pmd we
272 * preallocate which never got a corresponding vma will need to be
275 static void pgd_mop_up_pmds(pgd_t
*pgdp
)
279 for(i
= 0; i
< UNSHARED_PTRS_PER_PGD
; i
++) {
282 if (pgd_val(pgd
) != 0) {
283 pmd_t
*pmd
= (pmd_t
*)pgd_page_vaddr(pgd
);
285 pgdp
[i
] = native_make_pgd(0);
287 paravirt_release_pd(pgd_val(pgd
) >> PAGE_SHIFT
);
294 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
295 * updating the top-level pagetable entries to guarantee the
296 * processor notices the update. Since this is expensive, and
297 * all 4 top-level entries are used almost immediately in a
298 * new process's life, we just pre-populate them here.
300 * Also, if we're in a paravirt environment where the kernel pmd is
301 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
302 * and initialize the kernel pmds here.
304 static int pgd_prepopulate_pmd(struct mm_struct
*mm
, pgd_t
*pgd
)
310 pud
= pud_offset(pgd
, 0);
311 for (addr
= i
= 0; i
< UNSHARED_PTRS_PER_PGD
;
312 i
++, pud
++, addr
+= PUD_SIZE
) {
313 pmd_t
*pmd
= pmd_alloc_one(mm
, addr
);
316 pgd_mop_up_pmds(pgd
);
320 if (i
>= USER_PTRS_PER_PGD
)
321 memcpy(pmd
, (pmd_t
*)pgd_page_vaddr(swapper_pg_dir
[i
]),
322 sizeof(pmd_t
) * PTRS_PER_PMD
);
324 pud_populate(mm
, pud
, pmd
);
329 #else /* !CONFIG_X86_PAE */
330 /* No need to prepopulate any pagetable entries in non-PAE modes. */
331 static int pgd_prepopulate_pmd(struct mm_struct
*mm
, pgd_t
*pgd
)
336 static void pgd_mop_up_pmds(pgd_t
*pgd
)
339 #endif /* CONFIG_X86_PAE */
341 pgd_t
*pgd_alloc(struct mm_struct
*mm
)
343 pgd_t
*pgd
= quicklist_alloc(0, GFP_KERNEL
, pgd_ctor
);
345 mm
->pgd
= pgd
; /* so that alloc_pd can use it */
347 if (pgd
&& !pgd_prepopulate_pmd(mm
, pgd
)) {
348 quicklist_free(0, pgd_dtor
, pgd
);
355 void pgd_free(pgd_t
*pgd
)
357 pgd_mop_up_pmds(pgd
);
358 quicklist_free(0, pgd_dtor
, pgd
);
361 void check_pgt_cache(void)
363 quicklist_trim(0, pgd_dtor
, 25, 16);
366 void __pte_free_tlb(struct mmu_gather
*tlb
, struct page
*pte
)
368 paravirt_release_pt(page_to_pfn(pte
));
369 tlb_remove_page(tlb
, pte
);
372 #ifdef CONFIG_X86_PAE
374 void __pmd_free_tlb(struct mmu_gather
*tlb
, pmd_t
*pmd
)
376 /* This is called just after the pmd has been detached from
377 the pgd, which requires a full tlb flush to be recognized
378 by the CPU. Rather than incurring multiple tlb flushes
379 while the address space is being pulled down, make the tlb
380 gathering machinery do a full flush when we're done. */
383 paravirt_release_pd(__pa(pmd
) >> PAGE_SHIFT
);
384 tlb_remove_page(tlb
, virt_to_page(pmd
));