x86: prepare for the unification of the cpa code
[wrt350n-kernel.git] / arch / x86 / mm / pageattr_32.c
blob1c7bd81a41945750a7da6bd5476665ee50298f6d
1 /*
2 * Copyright 2002 Andi Kleen, SuSE Labs.
3 * Thanks to Ben LaHaise for precious feedback.
4 */
6 #include <linux/highmem.h>
7 #include <linux/module.h>
8 #include <linux/sched.h>
9 #include <linux/slab.h>
10 #include <linux/mm.h>
12 #include <asm/processor.h>
13 #include <asm/tlbflush.h>
14 #include <asm/sections.h>
15 #include <asm/uaccess.h>
16 #include <asm/pgalloc.h>
18 pte_t *lookup_address(unsigned long address, int *level)
20 pgd_t *pgd = pgd_offset_k(address);
21 pud_t *pud;
22 pmd_t *pmd;
24 if (pgd_none(*pgd))
25 return NULL;
26 pud = pud_offset(pgd, address);
27 if (pud_none(*pud))
28 return NULL;
29 pmd = pmd_offset(pud, address);
30 if (pmd_none(*pmd))
31 return NULL;
32 *level = 3;
33 if (pmd_large(*pmd))
34 return (pte_t *)pmd;
35 *level = 4;
37 return pte_offset_kernel(pmd, address);
40 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
42 struct page *page;
44 /* change init_mm */
45 set_pte_atomic(kpte, pte);
46 if (SHARED_KERNEL_PMD)
47 return;
49 for (page = pgd_list; page; page = (struct page *)page->index) {
50 pgd_t *pgd;
51 pud_t *pud;
52 pmd_t *pmd;
54 pgd = (pgd_t *)page_address(page) + pgd_index(address);
55 pud = pud_offset(pgd, address);
56 pmd = pmd_offset(pud, address);
57 set_pte_atomic((pte_t *)pmd, pte);
61 static int split_large_page(pte_t *kpte, unsigned long address)
63 pgprot_t ref_prot = pte_pgprot(pte_clrhuge(*kpte));
64 gfp_t gfp_flags = GFP_KERNEL;
65 unsigned long flags;
66 unsigned long addr;
67 pte_t *pbase, *tmp;
68 struct page *base;
69 int i, level;
71 #ifdef CONFIG_DEBUG_PAGEALLOC
72 gfp_flags = GFP_ATOMIC;
73 #endif
74 base = alloc_pages(gfp_flags, 0);
75 if (!base)
76 return -ENOMEM;
78 spin_lock_irqsave(&pgd_lock, flags);
80 * Check for races, another CPU might have split this page
81 * up for us already:
83 tmp = lookup_address(address, &level);
84 if (tmp != kpte) {
85 WARN_ON_ONCE(1);
86 goto out_unlock;
89 address = __pa(address);
90 addr = address & LARGE_PAGE_MASK;
91 pbase = (pte_t *)page_address(base);
92 paravirt_alloc_pt(&init_mm, page_to_pfn(base));
94 for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE)
95 set_pte(&pbase[i], pfn_pte(addr >> PAGE_SHIFT, ref_prot));
98 * Install the new, split up pagetable:
100 __set_pmd_pte(kpte, address, mk_pte(base, ref_prot));
101 base = NULL;
103 out_unlock:
104 spin_unlock_irqrestore(&pgd_lock, flags);
106 if (base)
107 __free_pages(base, 0);
109 return 0;
112 static int __change_page_attr(struct page *page, pgprot_t prot)
114 struct page *kpte_page;
115 unsigned long address;
116 int level, err = 0;
117 pte_t *kpte;
119 BUG_ON(PageHighMem(page));
120 address = (unsigned long)page_address(page);
122 repeat:
123 kpte = lookup_address(address, &level);
124 if (!kpte)
125 return -EINVAL;
127 kpte_page = virt_to_page(kpte);
128 BUG_ON(PageLRU(kpte_page));
129 BUG_ON(PageCompound(kpte_page));
132 * Better fail early if someone sets the kernel text to NX.
133 * Does not cover __inittext
135 BUG_ON(address >= (unsigned long)&_text &&
136 address < (unsigned long)&_etext &&
137 (pgprot_val(prot) & _PAGE_NX));
139 if (level == 4) {
140 set_pte_atomic(kpte, mk_pte(page, canon_pgprot(prot)));
141 } else {
142 err = split_large_page(kpte, address);
143 if (!err)
144 goto repeat;
146 return err;
150 * Change the page attributes of an page in the linear mapping.
152 * This should be used when a page is mapped with a different caching policy
153 * than write-back somewhere - some CPUs do not like it when mappings with
154 * different caching policies exist. This changes the page attributes of the
155 * in kernel linear mapping too.
157 * The caller needs to ensure that there are no conflicting mappings elsewhere.
158 * This function only deals with the kernel linear map.
160 * Caller must call global_flush_tlb() after this.
162 int change_page_attr(struct page *page, int numpages, pgprot_t prot)
164 int err = 0, i;
166 for (i = 0; i < numpages; i++, page++) {
167 err = __change_page_attr(page, prot);
168 if (err)
169 break;
172 return err;
174 EXPORT_SYMBOL(change_page_attr);
176 int change_page_attr_addr(unsigned long addr, int numpages, pgprot_t prot)
178 int i;
179 unsigned long pfn = (__pa(addr) >> PAGE_SHIFT);
181 for (i = 0; i < numpages; i++) {
182 if (!pfn_valid(pfn + i)) {
183 WARN_ON_ONCE(1);
184 break;
185 } else {
186 int level;
187 pte_t *pte = lookup_address(addr + i*PAGE_SIZE, &level);
188 BUG_ON(pte && pte_none(*pte));
192 return change_page_attr(virt_to_page(addr), i, prot);
195 static void flush_kernel_map(void *arg)
198 * Flush all to work around Errata in early athlons regarding
199 * large page flushing.
201 __flush_tlb_all();
203 if (boot_cpu_data.x86_model >= 4)
204 wbinvd();
207 void global_flush_tlb(void)
209 BUG_ON(irqs_disabled());
211 on_each_cpu(flush_kernel_map, NULL, 1, 1);
213 EXPORT_SYMBOL(global_flush_tlb);
215 #ifdef CONFIG_DEBUG_PAGEALLOC
216 void kernel_map_pages(struct page *page, int numpages, int enable)
218 if (PageHighMem(page))
219 return;
220 if (!enable) {
221 debug_check_no_locks_freed(page_address(page),
222 numpages * PAGE_SIZE);
226 * If page allocator is not up yet then do not call c_p_a():
228 if (!debug_pagealloc_enabled)
229 return;
232 * the return value is ignored - the calls cannot fail,
233 * large pages are disabled at boot time.
235 change_page_attr(page, numpages, enable ? PAGE_KERNEL : __pgprot(0));
238 * we should perform an IPI and flush all tlbs,
239 * but that can deadlock->flush only current cpu.
241 __flush_tlb_all();
243 #endif