2 * Copyright 2002 Andi Kleen, SuSE Labs.
3 * Thanks to Ben LaHaise for precious feedback.
6 #include <linux/config.h>
8 #include <linux/sched.h>
9 #include <linux/highmem.h>
10 #include <linux/module.h>
11 #include <linux/slab.h>
12 #include <asm/uaccess.h>
13 #include <asm/processor.h>
14 #include <asm/tlbflush.h>
16 static DEFINE_SPINLOCK(cpa_lock
);
17 static struct list_head df_list
= LIST_HEAD_INIT(df_list
);
20 pte_t
*lookup_address(unsigned long address
)
22 pgd_t
*pgd
= pgd_offset_k(address
);
27 pud
= pud_offset(pgd
, address
);
30 pmd
= pmd_offset(pud
, address
);
35 return pte_offset_kernel(pmd
, address
);
38 static struct page
*split_large_page(unsigned long address
, pgprot_t prot
)
45 spin_unlock_irq(&cpa_lock
);
46 base
= alloc_pages(GFP_KERNEL
, 0);
47 spin_lock_irq(&cpa_lock
);
51 address
= __pa(address
);
52 addr
= address
& LARGE_PAGE_MASK
;
53 pbase
= (pte_t
*)page_address(base
);
54 for (i
= 0; i
< PTRS_PER_PTE
; i
++, addr
+= PAGE_SIZE
) {
55 pbase
[i
] = pfn_pte(addr
>> PAGE_SHIFT
,
56 addr
== address
? prot
: PAGE_KERNEL
);
61 static void flush_kernel_map(void *dummy
)
63 /* Could use CLFLUSH here if the CPU supports it (Hammer,P4) */
64 if (boot_cpu_data
.x86_model
>= 4)
65 asm volatile("wbinvd":::"memory");
66 /* Flush all to work around Errata in early athlons regarding
67 * large page flushing.
72 static void set_pmd_pte(pte_t
*kpte
, unsigned long address
, pte_t pte
)
77 set_pte_atomic(kpte
, pte
); /* change init_mm */
81 spin_lock_irqsave(&pgd_lock
, flags
);
82 for (page
= pgd_list
; page
; page
= (struct page
*)page
->index
) {
86 pgd
= (pgd_t
*)page_address(page
) + pgd_index(address
);
87 pud
= pud_offset(pgd
, address
);
88 pmd
= pmd_offset(pud
, address
);
89 set_pte_atomic((pte_t
*)pmd
, pte
);
91 spin_unlock_irqrestore(&pgd_lock
, flags
);
95 * No more special protections in this 2/4MB area - revert to a
98 static inline void revert_page(struct page
*kpte_page
, unsigned long address
)
100 pte_t
*linear
= (pte_t
*)
101 pmd_offset(pud_offset(pgd_offset_k(address
), address
), address
);
102 set_pmd_pte(linear
, address
,
103 pfn_pte((__pa(address
) & LARGE_PAGE_MASK
) >> PAGE_SHIFT
,
108 __change_page_attr(struct page
*page
, pgprot_t prot
)
111 unsigned long address
;
112 struct page
*kpte_page
;
114 BUG_ON(PageHighMem(page
));
115 address
= (unsigned long)page_address(page
);
117 kpte
= lookup_address(address
);
120 kpte_page
= virt_to_page(kpte
);
121 if (pgprot_val(prot
) != pgprot_val(PAGE_KERNEL
)) {
122 if ((pte_val(*kpte
) & _PAGE_PSE
) == 0) {
123 set_pte_atomic(kpte
, mk_pte(page
, prot
));
125 struct page
*split
= split_large_page(address
, prot
);
128 set_pmd_pte(kpte
,address
,mk_pte(split
, PAGE_KERNEL
));
132 } else if ((pte_val(*kpte
) & _PAGE_PSE
) == 0) {
133 set_pte_atomic(kpte
, mk_pte(page
, PAGE_KERNEL
));
134 __put_page(kpte_page
);
139 * If the pte was reserved, it means it was created at boot
140 * time (not via split_large_page) and in turn we must not
141 * replace it with a largepage.
143 if (!PageReserved(kpte_page
)) {
144 /* memleak and potential failed 2M page regeneration */
145 BUG_ON(!page_count(kpte_page
));
147 if (cpu_has_pse
&& (page_count(kpte_page
) == 1)) {
148 list_add(&kpte_page
->lru
, &df_list
);
149 revert_page(kpte_page
, address
);
155 static inline void flush_map(void)
157 on_each_cpu(flush_kernel_map
, NULL
, 1, 1);
161 * Change the page attributes of an page in the linear mapping.
163 * This should be used when a page is mapped with a different caching policy
164 * than write-back somewhere - some CPUs do not like it when mappings with
165 * different caching policies exist. This changes the page attributes of the
166 * in kernel linear mapping too.
168 * The caller needs to ensure that there are no conflicting mappings elsewhere.
169 * This function only deals with the kernel linear map.
171 * Caller must call global_flush_tlb() after this.
173 int change_page_attr(struct page
*page
, int numpages
, pgprot_t prot
)
179 spin_lock_irqsave(&cpa_lock
, flags
);
180 for (i
= 0; i
< numpages
; i
++, page
++) {
181 err
= __change_page_attr(page
, prot
);
185 spin_unlock_irqrestore(&cpa_lock
, flags
);
189 void global_flush_tlb(void)
192 struct page
*pg
, *next
;
194 BUG_ON(irqs_disabled());
196 spin_lock_irq(&cpa_lock
);
197 list_splice_init(&df_list
, &l
);
198 spin_unlock_irq(&cpa_lock
);
200 list_for_each_entry_safe(pg
, next
, &l
, lru
)
204 #ifdef CONFIG_DEBUG_PAGEALLOC
205 void kernel_map_pages(struct page
*page
, int numpages
, int enable
)
207 if (PageHighMem(page
))
209 /* the return value is ignored - the calls cannot fail,
210 * large pages are disabled at boot time.
212 change_page_attr(page
, numpages
, enable
? PAGE_KERNEL
: __pgprot(0));
213 /* we should perform an IPI and flush all tlbs,
214 * but that can deadlock->flush only current cpu.
220 EXPORT_SYMBOL(change_page_attr
);
221 EXPORT_SYMBOL(global_flush_tlb
);