2 #include <asm/pgalloc.h>
3 #include <asm/pgtable.h>
6 pte_t
*pte_alloc_one_kernel(struct mm_struct
*mm
, unsigned long address
)
8 return (pte_t
*)__get_free_page(GFP_KERNEL
|__GFP_REPEAT
|__GFP_ZERO
);
11 pgtable_t
pte_alloc_one(struct mm_struct
*mm
, unsigned long address
)
16 pte
= alloc_pages(GFP_KERNEL
|__GFP_HIGHMEM
|__GFP_REPEAT
|__GFP_ZERO
, 0);
18 pte
= alloc_pages(GFP_KERNEL
|__GFP_REPEAT
|__GFP_ZERO
, 0);
21 pgtable_page_ctor(pte
);
25 void __pte_free_tlb(struct mmu_gather
*tlb
, struct page
*pte
)
27 pgtable_page_dtor(pte
);
28 paravirt_release_pte(page_to_pfn(pte
));
29 tlb_remove_page(tlb
, pte
);
32 #if PAGETABLE_LEVELS > 2
33 void __pmd_free_tlb(struct mmu_gather
*tlb
, pmd_t
*pmd
)
35 paravirt_release_pmd(__pa(pmd
) >> PAGE_SHIFT
);
36 tlb_remove_page(tlb
, virt_to_page(pmd
));
39 #if PAGETABLE_LEVELS > 3
40 void __pud_free_tlb(struct mmu_gather
*tlb
, pud_t
*pud
)
42 paravirt_release_pud(__pa(pud
) >> PAGE_SHIFT
);
43 tlb_remove_page(tlb
, virt_to_page(pud
));
45 #endif /* PAGETABLE_LEVELS > 3 */
46 #endif /* PAGETABLE_LEVELS > 2 */
48 static inline void pgd_list_add(pgd_t
*pgd
)
50 struct page
*page
= virt_to_page(pgd
);
52 list_add(&page
->lru
, &pgd_list
);
55 static inline void pgd_list_del(pgd_t
*pgd
)
57 struct page
*page
= virt_to_page(pgd
);
62 #define UNSHARED_PTRS_PER_PGD \
63 (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
65 static void pgd_ctor(void *p
)
70 /* Clear usermode parts of PGD */
71 memset(pgd
, 0, KERNEL_PGD_BOUNDARY
*sizeof(pgd_t
));
73 spin_lock_irqsave(&pgd_lock
, flags
);
75 /* If the pgd points to a shared pagetable level (either the
76 ptes in non-PAE, or shared PMD in PAE), then just copy the
77 references from swapper_pg_dir. */
78 if (PAGETABLE_LEVELS
== 2 ||
79 (PAGETABLE_LEVELS
== 3 && SHARED_KERNEL_PMD
) ||
80 PAGETABLE_LEVELS
== 4) {
81 clone_pgd_range(pgd
+ KERNEL_PGD_BOUNDARY
,
82 swapper_pg_dir
+ KERNEL_PGD_BOUNDARY
,
84 paravirt_alloc_pmd_clone(__pa(pgd
) >> PAGE_SHIFT
,
85 __pa(swapper_pg_dir
) >> PAGE_SHIFT
,
90 /* list required to sync kernel mapping updates */
91 if (!SHARED_KERNEL_PMD
)
94 spin_unlock_irqrestore(&pgd_lock
, flags
);
97 static void pgd_dtor(void *pgd
)
99 unsigned long flags
; /* can be called from interrupt context */
101 if (SHARED_KERNEL_PMD
)
104 spin_lock_irqsave(&pgd_lock
, flags
);
106 spin_unlock_irqrestore(&pgd_lock
, flags
);
110 * List of all pgd's needed for non-PAE so it can invalidate entries
111 * in both cached and uncached pgd's; not needed for PAE since the
112 * kernel pmd is shared. If PAE were not to share the pmd a similar
113 * tactic would be needed. This is essentially codepath-based locking
114 * against pageattr.c; it is the unique case in which a valid change
115 * of kernel pagetables can't be lazily synchronized by vmalloc faults.
116 * vmalloc faults work because attached pagetables are never freed.
120 #ifdef CONFIG_X86_PAE
122 * Mop up any pmd pages which may still be attached to the pgd.
123 * Normally they will be freed by munmap/exit_mmap, but any pmd we
124 * preallocate which never got a corresponding vma will need to be
127 static void pgd_mop_up_pmds(struct mm_struct
*mm
, pgd_t
*pgdp
)
131 for(i
= 0; i
< UNSHARED_PTRS_PER_PGD
; i
++) {
134 if (pgd_val(pgd
) != 0) {
135 pmd_t
*pmd
= (pmd_t
*)pgd_page_vaddr(pgd
);
137 pgdp
[i
] = native_make_pgd(0);
139 paravirt_release_pmd(pgd_val(pgd
) >> PAGE_SHIFT
);
146 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
147 * updating the top-level pagetable entries to guarantee the
148 * processor notices the update. Since this is expensive, and
149 * all 4 top-level entries are used almost immediately in a
150 * new process's life, we just pre-populate them here.
152 * Also, if we're in a paravirt environment where the kernel pmd is
153 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
154 * and initialize the kernel pmds here.
156 static int pgd_prepopulate_pmd(struct mm_struct
*mm
, pgd_t
*pgd
)
162 pud
= pud_offset(pgd
, 0);
163 for (addr
= i
= 0; i
< UNSHARED_PTRS_PER_PGD
;
164 i
++, pud
++, addr
+= PUD_SIZE
) {
165 pmd_t
*pmd
= pmd_alloc_one(mm
, addr
);
168 pgd_mop_up_pmds(mm
, pgd
);
172 if (i
>= KERNEL_PGD_BOUNDARY
)
173 memcpy(pmd
, (pmd_t
*)pgd_page_vaddr(swapper_pg_dir
[i
]),
174 sizeof(pmd_t
) * PTRS_PER_PMD
);
176 pud_populate(mm
, pud
, pmd
);
182 void pud_populate(struct mm_struct
*mm
, pud_t
*pudp
, pmd_t
*pmd
)
184 paravirt_alloc_pmd(mm
, __pa(pmd
) >> PAGE_SHIFT
);
186 /* Note: almost everything apart from _PAGE_PRESENT is
187 reserved at the pmd (PDPT) level. */
188 set_pud(pudp
, __pud(__pa(pmd
) | _PAGE_PRESENT
));
191 * According to Intel App note "TLBs, Paging-Structure Caches,
192 * and Their Invalidation", April 2007, document 317080-001,
193 * section 8.1: in PAE mode we explicitly have to flush the
194 * TLB via cr3 if the top-level pgd is changed...
196 if (mm
== current
->active_mm
)
197 write_cr3(read_cr3());
199 #else /* !CONFIG_X86_PAE */
200 /* No need to prepopulate any pagetable entries in non-PAE modes. */
201 static int pgd_prepopulate_pmd(struct mm_struct
*mm
, pgd_t
*pgd
)
206 static void pgd_mop_up_pmds(struct mm_struct
*mm
, pgd_t
*pgd
)
209 #endif /* CONFIG_X86_PAE */
211 pgd_t
*pgd_alloc(struct mm_struct
*mm
)
213 pgd_t
*pgd
= (pgd_t
*)__get_free_page(GFP_KERNEL
| __GFP_ZERO
);
215 /* so that alloc_pmd can use it */
220 if (pgd
&& !pgd_prepopulate_pmd(mm
, pgd
)) {
222 free_page((unsigned long)pgd
);
229 void pgd_free(struct mm_struct
*mm
, pgd_t
*pgd
)
231 pgd_mop_up_pmds(mm
, pgd
);
233 free_page((unsigned long)pgd
);
236 int ptep_set_access_flags(struct vm_area_struct
*vma
,
237 unsigned long address
, pte_t
*ptep
,
238 pte_t entry
, int dirty
)
240 int changed
= !pte_same(*ptep
, entry
);
242 if (changed
&& dirty
) {
244 pte_update_defer(vma
->vm_mm
, address
, ptep
);
245 flush_tlb_page(vma
, address
);
251 int ptep_test_and_clear_young(struct vm_area_struct
*vma
,
252 unsigned long addr
, pte_t
*ptep
)
256 if (pte_young(*ptep
))
257 ret
= test_and_clear_bit(_PAGE_BIT_ACCESSED
,
261 pte_update(vma
->vm_mm
, addr
, ptep
);
266 int ptep_clear_flush_young(struct vm_area_struct
*vma
,
267 unsigned long address
, pte_t
*ptep
)
271 young
= ptep_test_and_clear_young(vma
, address
, ptep
);
273 flush_tlb_page(vma
, address
);