2 * Copyright 2002 Andi Kleen, SuSE Labs.
3 * Thanks to Ben LaHaise for precious feedback.
6 #include <linux/config.h>
8 #include <linux/sched.h>
9 #include <linux/highmem.h>
10 #include <linux/module.h>
11 #include <linux/slab.h>
12 #include <asm/uaccess.h>
13 #include <asm/processor.h>
14 #include <asm/tlbflush.h>
15 #include <asm/pgalloc.h>
16 #include <asm/sections.h>
18 static DEFINE_SPINLOCK(cpa_lock
);
19 static struct list_head df_list
= LIST_HEAD_INIT(df_list
);
22 pte_t
*lookup_address(unsigned long address
)
24 pgd_t
*pgd
= pgd_offset_k(address
);
29 pud
= pud_offset(pgd
, address
);
32 pmd
= pmd_offset(pud
, address
);
37 return pte_offset_kernel(pmd
, address
);
40 static struct page
*split_large_page(unsigned long address
, pgprot_t prot
,
48 spin_unlock_irq(&cpa_lock
);
49 base
= alloc_pages(GFP_KERNEL
, 0);
50 spin_lock_irq(&cpa_lock
);
55 * page_private is used to track the number of entries in
56 * the page table page that have non standard attributes.
59 page_private(base
) = 0;
61 address
= __pa(address
);
62 addr
= address
& LARGE_PAGE_MASK
;
63 pbase
= (pte_t
*)page_address(base
);
64 for (i
= 0; i
< PTRS_PER_PTE
; i
++, addr
+= PAGE_SIZE
) {
65 set_pte(&pbase
[i
], pfn_pte(addr
>> PAGE_SHIFT
,
66 addr
== address
? prot
: ref_prot
));
71 static void flush_kernel_map(void *dummy
)
73 /* Could use CLFLUSH here if the CPU supports it (Hammer,P4) */
74 if (boot_cpu_data
.x86_model
>= 4)
76 /* Flush all to work around Errata in early athlons regarding
77 * large page flushing.
82 static void set_pmd_pte(pte_t
*kpte
, unsigned long address
, pte_t pte
)
87 set_pte_atomic(kpte
, pte
); /* change init_mm */
91 spin_lock_irqsave(&pgd_lock
, flags
);
92 for (page
= pgd_list
; page
; page
= (struct page
*)page
->index
) {
96 pgd
= (pgd_t
*)page_address(page
) + pgd_index(address
);
97 pud
= pud_offset(pgd
, address
);
98 pmd
= pmd_offset(pud
, address
);
99 set_pte_atomic((pte_t
*)pmd
, pte
);
101 spin_unlock_irqrestore(&pgd_lock
, flags
);
105 * No more special protections in this 2/4MB area - revert to a
108 static inline void revert_page(struct page
*kpte_page
, unsigned long address
)
114 ((address
& LARGE_PAGE_MASK
) < (unsigned long)&_etext
)
115 ? PAGE_KERNEL_LARGE_EXEC
: PAGE_KERNEL_LARGE
;
118 pmd_offset(pud_offset(pgd_offset_k(address
), address
), address
);
119 set_pmd_pte(linear
, address
,
120 pfn_pte((__pa(address
) & LARGE_PAGE_MASK
) >> PAGE_SHIFT
,
125 __change_page_attr(struct page
*page
, pgprot_t prot
)
128 unsigned long address
;
129 struct page
*kpte_page
;
131 BUG_ON(PageHighMem(page
));
132 address
= (unsigned long)page_address(page
);
134 kpte
= lookup_address(address
);
137 kpte_page
= virt_to_page(kpte
);
138 if (pgprot_val(prot
) != pgprot_val(PAGE_KERNEL
)) {
139 if ((pte_val(*kpte
) & _PAGE_PSE
) == 0) {
140 set_pte_atomic(kpte
, mk_pte(page
, prot
));
146 ((address
& LARGE_PAGE_MASK
) < (unsigned long)&_etext
)
147 ? PAGE_KERNEL_EXEC
: PAGE_KERNEL
;
148 split
= split_large_page(address
, prot
, ref_prot
);
151 set_pmd_pte(kpte
,address
,mk_pte(split
, ref_prot
));
154 page_private(kpte_page
)++;
155 } else if ((pte_val(*kpte
) & _PAGE_PSE
) == 0) {
156 set_pte_atomic(kpte
, mk_pte(page
, PAGE_KERNEL
));
157 BUG_ON(page_private(kpte_page
) == 0);
158 page_private(kpte_page
)--;
163 * If the pte was reserved, it means it was created at boot
164 * time (not via split_large_page) and in turn we must not
165 * replace it with a largepage.
167 if (!PageReserved(kpte_page
)) {
168 if (cpu_has_pse
&& (page_private(kpte_page
) == 0)) {
169 ClearPagePrivate(kpte_page
);
170 list_add(&kpte_page
->lru
, &df_list
);
171 revert_page(kpte_page
, address
);
177 static inline void flush_map(void)
179 on_each_cpu(flush_kernel_map
, NULL
, 1, 1);
183 * Change the page attributes of an page in the linear mapping.
185 * This should be used when a page is mapped with a different caching policy
186 * than write-back somewhere - some CPUs do not like it when mappings with
187 * different caching policies exist. This changes the page attributes of the
188 * in kernel linear mapping too.
190 * The caller needs to ensure that there are no conflicting mappings elsewhere.
191 * This function only deals with the kernel linear map.
193 * Caller must call global_flush_tlb() after this.
195 int change_page_attr(struct page
*page
, int numpages
, pgprot_t prot
)
201 spin_lock_irqsave(&cpa_lock
, flags
);
202 for (i
= 0; i
< numpages
; i
++, page
++) {
203 err
= __change_page_attr(page
, prot
);
207 spin_unlock_irqrestore(&cpa_lock
, flags
);
211 void global_flush_tlb(void)
214 struct page
*pg
, *next
;
216 BUG_ON(irqs_disabled());
218 spin_lock_irq(&cpa_lock
);
219 list_splice_init(&df_list
, &l
);
220 spin_unlock_irq(&cpa_lock
);
222 list_for_each_entry_safe(pg
, next
, &l
, lru
)
226 #ifdef CONFIG_DEBUG_PAGEALLOC
227 void kernel_map_pages(struct page
*page
, int numpages
, int enable
)
229 if (PageHighMem(page
))
232 mutex_debug_check_no_locks_freed(page_address(page
),
233 numpages
* PAGE_SIZE
);
235 /* the return value is ignored - the calls cannot fail,
236 * large pages are disabled at boot time.
238 change_page_attr(page
, numpages
, enable
? PAGE_KERNEL
: __pgprot(0));
239 /* we should perform an IPI and flush all tlbs,
240 * but that can deadlock->flush only current cpu.
246 EXPORT_SYMBOL(change_page_attr
);
247 EXPORT_SYMBOL(global_flush_tlb
);