2 * Copyright 2002 Andi Kleen, SuSE Labs.
3 * Thanks to Ben LaHaise for precious feedback.
6 #include <linux/config.h>
8 #include <linux/sched.h>
9 #include <linux/highmem.h>
10 #include <linux/module.h>
11 #include <linux/slab.h>
12 #include <asm/uaccess.h>
13 #include <asm/processor.h>
14 #include <asm/tlbflush.h>
17 static inline pte_t
*lookup_address(unsigned long address
)
19 pgd_t
*pgd
= pgd_offset_k(address
);
25 pud
= pud_offset(pgd
, address
);
26 if (!pud_present(*pud
))
28 pmd
= pmd_offset(pud
, address
);
29 if (!pmd_present(*pmd
))
33 pte
= pte_offset_kernel(pmd
, address
);
34 if (pte
&& !pte_present(*pte
))
39 static struct page
*split_large_page(unsigned long address
, pgprot_t prot
,
44 struct page
*base
= alloc_pages(GFP_KERNEL
, 0);
49 * page_private is used to track the number of entries in
50 * the page table page have non standard attributes.
53 page_private(base
) = 0;
55 address
= __pa(address
);
56 addr
= address
& LARGE_PAGE_MASK
;
57 pbase
= (pte_t
*)page_address(base
);
58 for (i
= 0; i
< PTRS_PER_PTE
; i
++, addr
+= PAGE_SIZE
) {
59 pbase
[i
] = pfn_pte(addr
>> PAGE_SHIFT
,
60 addr
== address
? prot
: ref_prot
);
66 static void flush_kernel_map(void *address
)
68 if (0 && address
&& cpu_has_clflush
) {
69 /* is this worth it? */
71 for (i
= 0; i
< PAGE_SIZE
; i
+= boot_cpu_data
.x86_clflush_size
)
72 asm volatile("clflush (%0)" :: "r" (address
+ i
));
74 asm volatile("wbinvd":::"memory");
76 __flush_tlb_one(address
);
82 static inline void flush_map(unsigned long address
)
84 on_each_cpu(flush_kernel_map
, (void *)address
, 1, 1);
87 static struct page
*deferred_pages
; /* protected by init_mm.mmap_sem */
89 static inline void save_page(struct page
*fpage
)
91 fpage
->lru
.next
= (struct list_head
*)deferred_pages
;
92 deferred_pages
= fpage
;
96 * No more special protections in this 2/4MB area - revert to a
99 static void revert_page(unsigned long address
, pgprot_t ref_prot
)
106 pgd
= pgd_offset_k(address
);
107 BUG_ON(pgd_none(*pgd
));
108 pud
= pud_offset(pgd
,address
);
109 BUG_ON(pud_none(*pud
));
110 pmd
= pmd_offset(pud
, address
);
111 BUG_ON(pmd_val(*pmd
) & _PAGE_PSE
);
112 pgprot_val(ref_prot
) |= _PAGE_PSE
;
113 large_pte
= mk_pte_phys(__pa(address
) & LARGE_PAGE_MASK
, ref_prot
);
114 set_pte((pte_t
*)pmd
, large_pte
);
118 __change_page_attr(unsigned long address
, unsigned long pfn
, pgprot_t prot
,
122 struct page
*kpte_page
;
125 kpte
= lookup_address(address
);
127 kpte_page
= virt_to_page(((unsigned long)kpte
) & PAGE_MASK
);
128 kpte_flags
= pte_val(*kpte
);
129 if (pgprot_val(prot
) != pgprot_val(ref_prot
)) {
130 if ((kpte_flags
& _PAGE_PSE
) == 0) {
131 set_pte(kpte
, pfn_pte(pfn
, prot
));
134 * split_large_page will take the reference for this
135 * change_page_attr on the split page.
139 ref_prot2
= __pgprot(pgprot_val(pte_pgprot(*lookup_address(address
))) & ~(1<<_PAGE_BIT_PSE
));
141 split
= split_large_page(address
, prot
, ref_prot2
);
144 set_pte(kpte
,mk_pte(split
, ref_prot2
));
147 page_private(kpte_page
)++;
148 } else if ((kpte_flags
& _PAGE_PSE
) == 0) {
149 set_pte(kpte
, pfn_pte(pfn
, ref_prot
));
150 BUG_ON(page_private(kpte_page
) == 0);
151 page_private(kpte_page
)--;
155 /* on x86-64 the direct mapping set at boot is not using 4k pages */
156 BUG_ON(PageReserved(kpte_page
));
158 if (page_private(kpte_page
) == 0) {
159 save_page(kpte_page
);
160 revert_page(address
, ref_prot
);
166 * Change the page attributes of an page in the linear mapping.
168 * This should be used when a page is mapped with a different caching policy
169 * than write-back somewhere - some CPUs do not like it when mappings with
170 * different caching policies exist. This changes the page attributes of the
171 * in kernel linear mapping too.
173 * The caller needs to ensure that there are no conflicting mappings elsewhere.
174 * This function only deals with the kernel linear map.
176 * Caller must call global_flush_tlb() after this.
178 int change_page_attr_addr(unsigned long address
, int numpages
, pgprot_t prot
)
183 down_write(&init_mm
.mmap_sem
);
184 for (i
= 0; i
< numpages
; i
++, address
+= PAGE_SIZE
) {
185 unsigned long pfn
= __pa(address
) >> PAGE_SHIFT
;
187 err
= __change_page_attr(address
, pfn
, prot
, PAGE_KERNEL
);
190 /* Handle kernel mapping too which aliases part of the
192 if (__pa(address
) < KERNEL_TEXT_SIZE
) {
194 pgprot_t prot2
= prot
;
195 addr2
= __START_KERNEL_map
+ __pa(address
);
196 pgprot_val(prot2
) &= ~_PAGE_NX
;
197 err
= __change_page_attr(addr2
, pfn
, prot2
, PAGE_KERNEL_EXEC
);
200 up_write(&init_mm
.mmap_sem
);
204 /* Don't call this for MMIO areas that may not have a mem_map entry */
205 int change_page_attr(struct page
*page
, int numpages
, pgprot_t prot
)
207 unsigned long addr
= (unsigned long)page_address(page
);
208 return change_page_attr_addr(addr
, numpages
, prot
);
211 void global_flush_tlb(void)
215 down_read(&init_mm
.mmap_sem
);
216 dpage
= xchg(&deferred_pages
, NULL
);
217 up_read(&init_mm
.mmap_sem
);
219 flush_map((dpage
&& !dpage
->lru
.next
) ? (unsigned long)page_address(dpage
) : 0);
221 struct page
*tmp
= dpage
;
222 dpage
= (struct page
*)dpage
->lru
.next
;
223 ClearPagePrivate(tmp
);
228 EXPORT_SYMBOL(change_page_attr
);
229 EXPORT_SYMBOL(global_flush_tlb
);