x86: cpa: clean up change_page_attr_set/clear()
[wrt350n-kernel.git] / arch / x86 / mm / pageattr.c
blob145f5edf488a01deb393d3bc0021ea80c937078d
1 /*
2 * Copyright 2002 Andi Kleen, SuSE Labs.
3 * Thanks to Ben LaHaise for precious feedback.
4 */
5 #include <linux/highmem.h>
6 #include <linux/bootmem.h>
7 #include <linux/module.h>
8 #include <linux/sched.h>
9 #include <linux/slab.h>
10 #include <linux/mm.h>
12 #include <asm/e820.h>
13 #include <asm/processor.h>
14 #include <asm/tlbflush.h>
15 #include <asm/sections.h>
16 #include <asm/uaccess.h>
17 #include <asm/pgalloc.h>
19 static inline int
20 within(unsigned long addr, unsigned long start, unsigned long end)
22 return addr >= start && addr < end;
26 * Flushing functions
28 void clflush_cache_range(void *addr, int size)
30 int i;
32 for (i = 0; i < size; i += boot_cpu_data.x86_clflush_size)
33 clflush(addr+i);
36 static void flush_kernel_map(void *arg)
39 * Flush all to work around Errata in early athlons regarding
40 * large page flushing.
42 __flush_tlb_all();
44 if (boot_cpu_data.x86_model >= 4)
45 wbinvd();
48 static void global_flush_tlb(void)
50 BUG_ON(irqs_disabled());
52 on_each_cpu(flush_kernel_map, NULL, 1, 1);
56 * Certain areas of memory on x86 require very specific protection flags,
57 * for example the BIOS area or kernel text. Callers don't always get this
58 * right (again, ioremap() on BIOS memory is not uncommon) so this function
59 * checks and fixes these known static required protection bits.
61 static inline pgprot_t static_protections(pgprot_t prot, unsigned long address)
63 pgprot_t forbidden = __pgprot(0);
66 * The BIOS area between 640k and 1Mb needs to be executable for
67 * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support.
69 if (within(__pa(address), BIOS_BEGIN, BIOS_END))
70 pgprot_val(forbidden) |= _PAGE_NX;
73 * The kernel text needs to be executable for obvious reasons
74 * Does not cover __inittext since that is gone later on
76 if (within(address, (unsigned long)_text, (unsigned long)_etext))
77 pgprot_val(forbidden) |= _PAGE_NX;
79 #ifdef CONFIG_DEBUG_RODATA
80 /* The .rodata section needs to be read-only */
81 if (within(address, (unsigned long)__start_rodata,
82 (unsigned long)__end_rodata))
83 pgprot_val(forbidden) |= _PAGE_RW;
84 #endif
86 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
88 return prot;
91 pte_t *lookup_address(unsigned long address, int *level)
93 pgd_t *pgd = pgd_offset_k(address);
94 pud_t *pud;
95 pmd_t *pmd;
97 *level = PG_LEVEL_NONE;
99 if (pgd_none(*pgd))
100 return NULL;
101 pud = pud_offset(pgd, address);
102 if (pud_none(*pud))
103 return NULL;
104 pmd = pmd_offset(pud, address);
105 if (pmd_none(*pmd))
106 return NULL;
108 *level = PG_LEVEL_2M;
109 if (pmd_large(*pmd))
110 return (pte_t *)pmd;
112 *level = PG_LEVEL_4K;
113 return pte_offset_kernel(pmd, address);
116 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
118 /* change init_mm */
119 set_pte_atomic(kpte, pte);
120 #ifdef CONFIG_X86_32
121 if (!SHARED_KERNEL_PMD) {
122 struct page *page;
124 for (page = pgd_list; page; page = (struct page *)page->index) {
125 pgd_t *pgd;
126 pud_t *pud;
127 pmd_t *pmd;
129 pgd = (pgd_t *)page_address(page) + pgd_index(address);
130 pud = pud_offset(pgd, address);
131 pmd = pmd_offset(pud, address);
132 set_pte_atomic((pte_t *)pmd, pte);
135 #endif
138 static int split_large_page(pte_t *kpte, unsigned long address)
140 pgprot_t ref_prot = pte_pgprot(pte_clrhuge(*kpte));
141 gfp_t gfp_flags = GFP_KERNEL;
142 unsigned long flags;
143 unsigned long addr;
144 pte_t *pbase, *tmp;
145 struct page *base;
146 int i, level;
148 #ifdef CONFIG_DEBUG_PAGEALLOC
149 gfp_flags = GFP_ATOMIC;
150 #endif
151 base = alloc_pages(gfp_flags, 0);
152 if (!base)
153 return -ENOMEM;
155 spin_lock_irqsave(&pgd_lock, flags);
157 * Check for races, another CPU might have split this page
158 * up for us already:
160 tmp = lookup_address(address, &level);
161 if (tmp != kpte) {
162 WARN_ON_ONCE(1);
163 goto out_unlock;
166 address = __pa(address);
167 addr = address & LARGE_PAGE_MASK;
168 pbase = (pte_t *)page_address(base);
169 #ifdef CONFIG_X86_32
170 paravirt_alloc_pt(&init_mm, page_to_pfn(base));
171 #endif
173 for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE)
174 set_pte(&pbase[i], pfn_pte(addr >> PAGE_SHIFT, ref_prot));
177 * Install the new, split up pagetable. Important detail here:
179 * On Intel the NX bit of all levels must be cleared to make a
180 * page executable. See section 4.13.2 of Intel 64 and IA-32
181 * Architectures Software Developer's Manual).
183 ref_prot = pte_pgprot(pte_mkexec(pte_clrhuge(*kpte)));
184 __set_pmd_pte(kpte, address, mk_pte(base, ref_prot));
185 base = NULL;
187 out_unlock:
188 spin_unlock_irqrestore(&pgd_lock, flags);
190 if (base)
191 __free_pages(base, 0);
193 return 0;
196 static int
197 __change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot)
199 struct page *kpte_page;
200 int level, err = 0;
201 pte_t *kpte;
203 #ifdef CONFIG_X86_32
204 BUG_ON(pfn > max_low_pfn);
205 #endif
207 repeat:
208 kpte = lookup_address(address, &level);
209 if (!kpte)
210 return -EINVAL;
212 kpte_page = virt_to_page(kpte);
213 BUG_ON(PageLRU(kpte_page));
214 BUG_ON(PageCompound(kpte_page));
216 prot = static_protections(prot, address);
218 if (level == PG_LEVEL_4K) {
219 WARN_ON_ONCE(pgprot_val(prot) & _PAGE_PSE);
220 set_pte_atomic(kpte, pfn_pte(pfn, canon_pgprot(prot)));
221 } else {
222 /* Clear the PSE bit for the 4k level pages ! */
223 pgprot_val(prot) = pgprot_val(prot) & ~_PAGE_PSE;
225 err = split_large_page(kpte, address);
226 if (!err)
227 goto repeat;
229 return err;
233 * change_page_attr_addr - Change page table attributes in linear mapping
234 * @address: Virtual address in linear mapping.
235 * @prot: New page table attribute (PAGE_*)
237 * Change page attributes of a page in the direct mapping. This is a variant
238 * of change_page_attr() that also works on memory holes that do not have
239 * mem_map entry (pfn_valid() is false).
241 * See change_page_attr() documentation for more details.
243 * Modules and drivers should use the set_memory_* APIs instead.
246 static int change_page_attr_addr(unsigned long address, pgprot_t prot)
248 int err = 0, kernel_map = 0;
249 unsigned long pfn = __pa(address) >> PAGE_SHIFT;
251 #ifdef CONFIG_X86_64
252 if (address >= __START_KERNEL_map &&
253 address < __START_KERNEL_map + KERNEL_TEXT_SIZE) {
255 address = (unsigned long)__va(__pa(address));
256 kernel_map = 1;
258 #endif
260 if (!kernel_map || pte_present(pfn_pte(0, prot))) {
261 err = __change_page_attr(address, pfn, prot);
262 if (err)
263 return err;
266 #ifdef CONFIG_X86_64
268 * Handle kernel mapping too which aliases part of
269 * lowmem:
271 if (__pa(address) < KERNEL_TEXT_SIZE) {
272 unsigned long addr2;
273 pgprot_t prot2;
275 addr2 = __START_KERNEL_map + __pa(address);
276 /* Make sure the kernel mappings stay executable */
277 prot2 = pte_pgprot(pte_mkexec(pfn_pte(0, prot)));
278 err = __change_page_attr(addr2, pfn, prot2);
280 #endif
282 return err;
286 * change_page_attr_set - Change page table attributes in the linear mapping.
287 * @addr: Virtual address in linear mapping.
288 * @numpages: Number of pages to change
289 * @prot: Protection/caching type bits to set (PAGE_*)
291 * Returns 0 on success, otherwise a negated errno.
293 * This should be used when a page is mapped with a different caching policy
294 * than write-back somewhere - some CPUs do not like it when mappings with
295 * different caching policies exist. This changes the page attributes of the
296 * in kernel linear mapping too.
298 * The caller needs to ensure that there are no conflicting mappings elsewhere
299 * (e.g. in user space) * This function only deals with the kernel linear map.
301 * This function is different from change_page_attr() in that only selected bits
302 * are impacted, all other bits remain as is.
304 static int change_page_attr_set(unsigned long addr, int numpages,
305 pgprot_t prot)
307 pgprot_t current_prot, new_prot;
308 int level;
309 pte_t *pte;
310 int i, ret;
312 for (i = 0; i < numpages ; i++) {
314 pte = lookup_address(addr, &level);
315 if (!pte)
316 return -EINVAL;
318 current_prot = pte_pgprot(*pte);
320 pgprot_val(new_prot) =
321 pgprot_val(current_prot) | pgprot_val(prot);
323 ret = change_page_attr_addr(addr, new_prot);
324 if (ret)
325 return ret;
326 addr += PAGE_SIZE;
328 return 0;
332 * change_page_attr_clear - Change page table attributes in the linear mapping.
333 * @addr: Virtual address in linear mapping.
334 * @numpages: Number of pages to change
335 * @prot: Protection/caching type bits to clear (PAGE_*)
337 * Returns 0 on success, otherwise a negated errno.
339 * This should be used when a page is mapped with a different caching policy
340 * than write-back somewhere - some CPUs do not like it when mappings with
341 * different caching policies exist. This changes the page attributes of the
342 * in kernel linear mapping too.
344 * The caller needs to ensure that there are no conflicting mappings elsewhere
345 * (e.g. in user space) * This function only deals with the kernel linear map.
347 * This function is different from change_page_attr() in that only selected bits
348 * are impacted, all other bits remain as is.
350 static int change_page_attr_clear(unsigned long addr, int numpages,
351 pgprot_t prot)
353 pgprot_t current_prot, new_prot;
354 int level;
355 pte_t *pte;
356 int i, ret;
358 for (i = 0; i < numpages; i++) {
360 pte = lookup_address(addr, &level);
361 if (!pte)
362 return -EINVAL;
364 current_prot = pte_pgprot(*pte);
366 pgprot_val(new_prot) =
367 pgprot_val(current_prot) & ~pgprot_val(prot);
369 ret = change_page_attr_addr(addr, new_prot);
370 if (ret)
371 return ret;
372 addr += PAGE_SIZE;
374 return 0;
377 int set_memory_uc(unsigned long addr, int numpages)
379 int err;
381 err = change_page_attr_set(addr, numpages,
382 __pgprot(_PAGE_PCD | _PAGE_PWT));
383 global_flush_tlb();
384 return err;
386 EXPORT_SYMBOL(set_memory_uc);
388 int set_memory_wb(unsigned long addr, int numpages)
390 int err;
392 err = change_page_attr_clear(addr, numpages,
393 __pgprot(_PAGE_PCD | _PAGE_PWT));
394 global_flush_tlb();
395 return err;
397 EXPORT_SYMBOL(set_memory_wb);
399 int set_memory_x(unsigned long addr, int numpages)
401 int err;
403 err = change_page_attr_clear(addr, numpages,
404 __pgprot(_PAGE_NX));
405 global_flush_tlb();
406 return err;
408 EXPORT_SYMBOL(set_memory_x);
410 int set_memory_nx(unsigned long addr, int numpages)
412 int err;
414 err = change_page_attr_set(addr, numpages,
415 __pgprot(_PAGE_NX));
416 global_flush_tlb();
417 return err;
419 EXPORT_SYMBOL(set_memory_nx);
421 int set_memory_ro(unsigned long addr, int numpages)
423 int err;
425 err = change_page_attr_clear(addr, numpages,
426 __pgprot(_PAGE_RW));
427 global_flush_tlb();
428 return err;
431 int set_memory_rw(unsigned long addr, int numpages)
433 int err;
435 err = change_page_attr_set(addr, numpages,
436 __pgprot(_PAGE_RW));
437 global_flush_tlb();
438 return err;
441 int set_memory_np(unsigned long addr, int numpages)
443 int err;
445 err = change_page_attr_clear(addr, numpages,
446 __pgprot(_PAGE_PRESENT));
447 global_flush_tlb();
448 return err;
451 int set_pages_uc(struct page *page, int numpages)
453 unsigned long addr = (unsigned long)page_address(page);
455 return set_memory_uc(addr, numpages);
457 EXPORT_SYMBOL(set_pages_uc);
459 int set_pages_wb(struct page *page, int numpages)
461 unsigned long addr = (unsigned long)page_address(page);
463 return set_memory_wb(addr, numpages);
465 EXPORT_SYMBOL(set_pages_wb);
467 int set_pages_x(struct page *page, int numpages)
469 unsigned long addr = (unsigned long)page_address(page);
471 return set_memory_x(addr, numpages);
473 EXPORT_SYMBOL(set_pages_x);
475 int set_pages_nx(struct page *page, int numpages)
477 unsigned long addr = (unsigned long)page_address(page);
479 return set_memory_nx(addr, numpages);
481 EXPORT_SYMBOL(set_pages_nx);
483 int set_pages_ro(struct page *page, int numpages)
485 unsigned long addr = (unsigned long)page_address(page);
487 return set_memory_ro(addr, numpages);
490 int set_pages_rw(struct page *page, int numpages)
492 unsigned long addr = (unsigned long)page_address(page);
494 return set_memory_rw(addr, numpages);
498 #ifdef CONFIG_DEBUG_PAGEALLOC
500 static int __set_pages_p(struct page *page, int numpages)
502 unsigned long addr = (unsigned long)page_address(page);
503 return change_page_attr_set(addr, numpages,
504 __pgprot(_PAGE_PRESENT | _PAGE_RW));
507 static int __set_pages_np(struct page *page, int numpages)
509 unsigned long addr = (unsigned long)page_address(page);
510 return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_PRESENT));
513 void kernel_map_pages(struct page *page, int numpages, int enable)
515 if (PageHighMem(page))
516 return;
517 if (!enable) {
518 debug_check_no_locks_freed(page_address(page),
519 numpages * PAGE_SIZE);
523 * If page allocator is not up yet then do not call c_p_a():
525 if (!debug_pagealloc_enabled)
526 return;
529 * The return value is ignored - the calls cannot fail,
530 * large pages are disabled at boot time:
532 if (enable)
533 __set_pages_p(page, numpages);
534 else
535 __set_pages_np(page, numpages);
538 * We should perform an IPI and flush all tlbs,
539 * but that can deadlock->flush only current cpu:
541 __flush_tlb_all();
543 #endif
546 * The testcases use internal knowledge of the implementation that shouldn't
547 * be exposed to the rest of the kernel. Include these directly here.
549 #ifdef CONFIG_CPA_DEBUG
550 #include "pageattr-test.c"
551 #endif