2 * Copyright 2002 Andi Kleen, SuSE Labs.
3 * Thanks to Ben LaHaise for precious feedback.
5 #include <linux/highmem.h>
6 #include <linux/bootmem.h>
7 #include <linux/module.h>
8 #include <linux/sched.h>
9 #include <linux/slab.h>
13 #include <asm/processor.h>
14 #include <asm/tlbflush.h>
15 #include <asm/sections.h>
16 #include <asm/uaccess.h>
17 #include <asm/pgalloc.h>
20 within(unsigned long addr
, unsigned long start
, unsigned long end
)
22 return addr
>= start
&& addr
< end
;
28 void clflush_cache_range(void *addr
, int size
)
32 for (i
= 0; i
< size
; i
+= boot_cpu_data
.x86_clflush_size
)
36 static void flush_kernel_map(void *arg
)
39 * Flush all to work around Errata in early athlons regarding
40 * large page flushing.
44 if (boot_cpu_data
.x86_model
>= 4)
48 static void global_flush_tlb(void)
50 BUG_ON(irqs_disabled());
52 on_each_cpu(flush_kernel_map
, NULL
, 1, 1);
56 * Certain areas of memory on x86 require very specific protection flags,
57 * for example the BIOS area or kernel text. Callers don't always get this
58 * right (again, ioremap() on BIOS memory is not uncommon) so this function
59 * checks and fixes these known static required protection bits.
61 static inline pgprot_t
static_protections(pgprot_t prot
, unsigned long address
)
63 pgprot_t forbidden
= __pgprot(0);
66 * The BIOS area between 640k and 1Mb needs to be executable for
67 * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support.
69 if (within(__pa(address
), BIOS_BEGIN
, BIOS_END
))
70 pgprot_val(forbidden
) |= _PAGE_NX
;
73 * The kernel text needs to be executable for obvious reasons
74 * Does not cover __inittext since that is gone later on
76 if (within(address
, (unsigned long)_text
, (unsigned long)_etext
))
77 pgprot_val(forbidden
) |= _PAGE_NX
;
79 #ifdef CONFIG_DEBUG_RODATA
80 /* The .rodata section needs to be read-only */
81 if (within(address
, (unsigned long)__start_rodata
,
82 (unsigned long)__end_rodata
))
83 pgprot_val(forbidden
) |= _PAGE_RW
;
86 prot
= __pgprot(pgprot_val(prot
) & ~pgprot_val(forbidden
));
91 pte_t
*lookup_address(unsigned long address
, int *level
)
93 pgd_t
*pgd
= pgd_offset_k(address
);
97 *level
= PG_LEVEL_NONE
;
101 pud
= pud_offset(pgd
, address
);
104 pmd
= pmd_offset(pud
, address
);
108 *level
= PG_LEVEL_2M
;
112 *level
= PG_LEVEL_4K
;
113 return pte_offset_kernel(pmd
, address
);
116 static void __set_pmd_pte(pte_t
*kpte
, unsigned long address
, pte_t pte
)
119 set_pte_atomic(kpte
, pte
);
121 if (!SHARED_KERNEL_PMD
) {
124 for (page
= pgd_list
; page
; page
= (struct page
*)page
->index
) {
129 pgd
= (pgd_t
*)page_address(page
) + pgd_index(address
);
130 pud
= pud_offset(pgd
, address
);
131 pmd
= pmd_offset(pud
, address
);
132 set_pte_atomic((pte_t
*)pmd
, pte
);
138 static int split_large_page(pte_t
*kpte
, unsigned long address
)
140 pgprot_t ref_prot
= pte_pgprot(pte_clrhuge(*kpte
));
141 gfp_t gfp_flags
= GFP_KERNEL
;
148 #ifdef CONFIG_DEBUG_PAGEALLOC
149 gfp_flags
= GFP_ATOMIC
;
151 base
= alloc_pages(gfp_flags
, 0);
155 spin_lock_irqsave(&pgd_lock
, flags
);
157 * Check for races, another CPU might have split this page
160 tmp
= lookup_address(address
, &level
);
166 address
= __pa(address
);
167 addr
= address
& LARGE_PAGE_MASK
;
168 pbase
= (pte_t
*)page_address(base
);
170 paravirt_alloc_pt(&init_mm
, page_to_pfn(base
));
173 for (i
= 0; i
< PTRS_PER_PTE
; i
++, addr
+= PAGE_SIZE
)
174 set_pte(&pbase
[i
], pfn_pte(addr
>> PAGE_SHIFT
, ref_prot
));
177 * Install the new, split up pagetable. Important detail here:
179 * On Intel the NX bit of all levels must be cleared to make a
180 * page executable. See section 4.13.2 of Intel 64 and IA-32
181 * Architectures Software Developer's Manual).
183 ref_prot
= pte_pgprot(pte_mkexec(pte_clrhuge(*kpte
)));
184 __set_pmd_pte(kpte
, address
, mk_pte(base
, ref_prot
));
188 spin_unlock_irqrestore(&pgd_lock
, flags
);
191 __free_pages(base
, 0);
197 __change_page_attr(unsigned long address
, unsigned long pfn
, pgprot_t prot
)
199 struct page
*kpte_page
;
204 BUG_ON(pfn
> max_low_pfn
);
208 kpte
= lookup_address(address
, &level
);
212 kpte_page
= virt_to_page(kpte
);
213 BUG_ON(PageLRU(kpte_page
));
214 BUG_ON(PageCompound(kpte_page
));
216 prot
= static_protections(prot
, address
);
218 if (level
== PG_LEVEL_4K
) {
219 WARN_ON_ONCE(pgprot_val(prot
) & _PAGE_PSE
);
220 set_pte_atomic(kpte
, pfn_pte(pfn
, canon_pgprot(prot
)));
222 /* Clear the PSE bit for the 4k level pages ! */
223 pgprot_val(prot
) = pgprot_val(prot
) & ~_PAGE_PSE
;
225 err
= split_large_page(kpte
, address
);
233 * change_page_attr_addr - Change page table attributes in linear mapping
234 * @address: Virtual address in linear mapping.
235 * @prot: New page table attribute (PAGE_*)
237 * Change page attributes of a page in the direct mapping. This is a variant
238 * of change_page_attr() that also works on memory holes that do not have
239 * mem_map entry (pfn_valid() is false).
241 * See change_page_attr() documentation for more details.
243 * Modules and drivers should use the set_memory_* APIs instead.
246 static int change_page_attr_addr(unsigned long address
, pgprot_t prot
)
248 int err
= 0, kernel_map
= 0;
249 unsigned long pfn
= __pa(address
) >> PAGE_SHIFT
;
252 if (address
>= __START_KERNEL_map
&&
253 address
< __START_KERNEL_map
+ KERNEL_TEXT_SIZE
) {
255 address
= (unsigned long)__va(__pa(address
));
260 if (!kernel_map
|| pte_present(pfn_pte(0, prot
))) {
261 err
= __change_page_attr(address
, pfn
, prot
);
268 * Handle kernel mapping too which aliases part of
271 if (__pa(address
) < KERNEL_TEXT_SIZE
) {
275 addr2
= __START_KERNEL_map
+ __pa(address
);
276 /* Make sure the kernel mappings stay executable */
277 prot2
= pte_pgprot(pte_mkexec(pfn_pte(0, prot
)));
278 err
= __change_page_attr(addr2
, pfn
, prot2
);
286 * change_page_attr_set - Change page table attributes in the linear mapping.
287 * @addr: Virtual address in linear mapping.
288 * @numpages: Number of pages to change
289 * @prot: Protection/caching type bits to set (PAGE_*)
291 * Returns 0 on success, otherwise a negated errno.
293 * This should be used when a page is mapped with a different caching policy
294 * than write-back somewhere - some CPUs do not like it when mappings with
295 * different caching policies exist. This changes the page attributes of the
296 * in kernel linear mapping too.
298 * The caller needs to ensure that there are no conflicting mappings elsewhere
299 * (e.g. in user space) * This function only deals with the kernel linear map.
301 * This function is different from change_page_attr() in that only selected bits
302 * are impacted, all other bits remain as is.
304 static int change_page_attr_set(unsigned long addr
, int numpages
,
307 pgprot_t current_prot
, new_prot
;
312 for (i
= 0; i
< numpages
; i
++) {
314 pte
= lookup_address(addr
, &level
);
318 current_prot
= pte_pgprot(*pte
);
320 pgprot_val(new_prot
) =
321 pgprot_val(current_prot
) | pgprot_val(prot
);
323 ret
= change_page_attr_addr(addr
, new_prot
);
332 * change_page_attr_clear - Change page table attributes in the linear mapping.
333 * @addr: Virtual address in linear mapping.
334 * @numpages: Number of pages to change
335 * @prot: Protection/caching type bits to clear (PAGE_*)
337 * Returns 0 on success, otherwise a negated errno.
339 * This should be used when a page is mapped with a different caching policy
340 * than write-back somewhere - some CPUs do not like it when mappings with
341 * different caching policies exist. This changes the page attributes of the
342 * in kernel linear mapping too.
344 * The caller needs to ensure that there are no conflicting mappings elsewhere
345 * (e.g. in user space) * This function only deals with the kernel linear map.
347 * This function is different from change_page_attr() in that only selected bits
348 * are impacted, all other bits remain as is.
350 static int change_page_attr_clear(unsigned long addr
, int numpages
,
353 pgprot_t current_prot
, new_prot
;
358 for (i
= 0; i
< numpages
; i
++) {
360 pte
= lookup_address(addr
, &level
);
364 current_prot
= pte_pgprot(*pte
);
366 pgprot_val(new_prot
) =
367 pgprot_val(current_prot
) & ~pgprot_val(prot
);
369 ret
= change_page_attr_addr(addr
, new_prot
);
377 int set_memory_uc(unsigned long addr
, int numpages
)
381 err
= change_page_attr_set(addr
, numpages
,
382 __pgprot(_PAGE_PCD
| _PAGE_PWT
));
386 EXPORT_SYMBOL(set_memory_uc
);
388 int set_memory_wb(unsigned long addr
, int numpages
)
392 err
= change_page_attr_clear(addr
, numpages
,
393 __pgprot(_PAGE_PCD
| _PAGE_PWT
));
397 EXPORT_SYMBOL(set_memory_wb
);
399 int set_memory_x(unsigned long addr
, int numpages
)
403 err
= change_page_attr_clear(addr
, numpages
,
408 EXPORT_SYMBOL(set_memory_x
);
410 int set_memory_nx(unsigned long addr
, int numpages
)
414 err
= change_page_attr_set(addr
, numpages
,
419 EXPORT_SYMBOL(set_memory_nx
);
421 int set_memory_ro(unsigned long addr
, int numpages
)
425 err
= change_page_attr_clear(addr
, numpages
,
431 int set_memory_rw(unsigned long addr
, int numpages
)
435 err
= change_page_attr_set(addr
, numpages
,
441 int set_memory_np(unsigned long addr
, int numpages
)
445 err
= change_page_attr_clear(addr
, numpages
,
446 __pgprot(_PAGE_PRESENT
));
451 int set_pages_uc(struct page
*page
, int numpages
)
453 unsigned long addr
= (unsigned long)page_address(page
);
455 return set_memory_uc(addr
, numpages
);
457 EXPORT_SYMBOL(set_pages_uc
);
459 int set_pages_wb(struct page
*page
, int numpages
)
461 unsigned long addr
= (unsigned long)page_address(page
);
463 return set_memory_wb(addr
, numpages
);
465 EXPORT_SYMBOL(set_pages_wb
);
467 int set_pages_x(struct page
*page
, int numpages
)
469 unsigned long addr
= (unsigned long)page_address(page
);
471 return set_memory_x(addr
, numpages
);
473 EXPORT_SYMBOL(set_pages_x
);
475 int set_pages_nx(struct page
*page
, int numpages
)
477 unsigned long addr
= (unsigned long)page_address(page
);
479 return set_memory_nx(addr
, numpages
);
481 EXPORT_SYMBOL(set_pages_nx
);
483 int set_pages_ro(struct page
*page
, int numpages
)
485 unsigned long addr
= (unsigned long)page_address(page
);
487 return set_memory_ro(addr
, numpages
);
490 int set_pages_rw(struct page
*page
, int numpages
)
492 unsigned long addr
= (unsigned long)page_address(page
);
494 return set_memory_rw(addr
, numpages
);
498 #ifdef CONFIG_DEBUG_PAGEALLOC
500 static int __set_pages_p(struct page
*page
, int numpages
)
502 unsigned long addr
= (unsigned long)page_address(page
);
503 return change_page_attr_set(addr
, numpages
,
504 __pgprot(_PAGE_PRESENT
| _PAGE_RW
));
507 static int __set_pages_np(struct page
*page
, int numpages
)
509 unsigned long addr
= (unsigned long)page_address(page
);
510 return change_page_attr_clear(addr
, numpages
, __pgprot(_PAGE_PRESENT
));
513 void kernel_map_pages(struct page
*page
, int numpages
, int enable
)
515 if (PageHighMem(page
))
518 debug_check_no_locks_freed(page_address(page
),
519 numpages
* PAGE_SIZE
);
523 * If page allocator is not up yet then do not call c_p_a():
525 if (!debug_pagealloc_enabled
)
529 * The return value is ignored - the calls cannot fail,
530 * large pages are disabled at boot time:
533 __set_pages_p(page
, numpages
);
535 __set_pages_np(page
, numpages
);
538 * We should perform an IPI and flush all tlbs,
539 * but that can deadlock->flush only current cpu:
546 * The testcases use internal knowledge of the implementation that shouldn't
547 * be exposed to the rest of the kernel. Include these directly here.
549 #ifdef CONFIG_CPA_DEBUG
550 #include "pageattr-test.c"