x86: add kernel_map_pages() to 64-bit
[wrt350n-kernel.git] / arch / x86 / mm / pageattr_64.c
blobe1c860800ff15963566962f5e7f0448e0755b9dd
1 /*
2 * Copyright 2002 Andi Kleen, SuSE Labs.
3 * Thanks to Ben LaHaise for precious feedback.
4 */
6 #include <linux/highmem.h>
7 #include <linux/module.h>
8 #include <linux/sched.h>
9 #include <linux/slab.h>
10 #include <linux/mm.h>
12 void clflush_cache_range(void *addr, int size)
14 int i;
16 for (i = 0; i < size; i += boot_cpu_data.x86_clflush_size)
17 clflush(addr+i);
20 #include <asm/processor.h>
21 #include <asm/tlbflush.h>
22 #include <asm/sections.h>
23 #include <asm/uaccess.h>
24 #include <asm/pgalloc.h>
26 pte_t *lookup_address(unsigned long address, int *level)
28 pgd_t *pgd = pgd_offset_k(address);
29 pud_t *pud;
30 pmd_t *pmd;
32 if (pgd_none(*pgd))
33 return NULL;
34 pud = pud_offset(pgd, address);
35 if (pud_none(*pud))
36 return NULL;
37 pmd = pmd_offset(pud, address);
38 if (pmd_none(*pmd))
39 return NULL;
40 *level = 3;
41 if (pmd_large(*pmd))
42 return (pte_t *)pmd;
43 *level = 4;
45 return pte_offset_kernel(pmd, address);
48 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
50 /* change init_mm */
51 set_pte_atomic(kpte, pte);
52 #ifdef CONFIG_X86_32
53 if (SHARED_KERNEL_PMD)
54 return;
56 struct page *page;
58 for (page = pgd_list; page; page = (struct page *)page->index) {
59 pgd_t *pgd;
60 pud_t *pud;
61 pmd_t *pmd;
63 pgd = (pgd_t *)page_address(page) + pgd_index(address);
64 pud = pud_offset(pgd, address);
65 pmd = pmd_offset(pud, address);
66 set_pte_atomic((pte_t *)pmd, pte);
69 #endif
72 static int split_large_page(pte_t *kpte, unsigned long address)
74 pgprot_t ref_prot = pte_pgprot(pte_clrhuge(*kpte));
75 gfp_t gfp_flags = GFP_KERNEL;
76 unsigned long flags;
77 unsigned long addr;
78 pte_t *pbase, *tmp;
79 struct page *base;
80 int i, level;
83 #ifdef CONFIG_DEBUG_PAGEALLOC
84 gfp_flags = GFP_ATOMIC;
85 #endif
86 base = alloc_pages(gfp_flags, 0);
87 if (!base)
88 return -ENOMEM;
90 spin_lock_irqsave(&pgd_lock, flags);
92 * Check for races, another CPU might have split this page
93 * up for us already:
95 tmp = lookup_address(address, &level);
96 if (tmp != kpte) {
97 WARN_ON_ONCE(1);
98 goto out_unlock;
101 address = __pa(address);
102 addr = address & LARGE_PAGE_MASK;
103 pbase = (pte_t *)page_address(base);
104 #ifdef CONFIG_X86_32
105 paravirt_alloc_pt(&init_mm, page_to_pfn(base));
106 #endif
108 for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE)
109 set_pte(&pbase[i], pfn_pte(addr >> PAGE_SHIFT, ref_prot));
112 * Install the new, split up pagetable:
114 __set_pmd_pte(kpte, address, mk_pte(base, ref_prot));
115 base = NULL;
117 out_unlock:
118 spin_unlock_irqrestore(&pgd_lock, flags);
120 if (base)
121 __free_pages(base, 0);
123 return 0;
126 static int
127 __change_page_attr(unsigned long address, struct page *page, pgprot_t prot)
129 struct page *kpte_page;
130 int level, err = 0;
131 pte_t *kpte;
133 BUG_ON(PageHighMem(page));
135 repeat:
136 kpte = lookup_address(address, &level);
137 if (!kpte)
138 return -EINVAL;
140 kpte_page = virt_to_page(kpte);
141 BUG_ON(PageLRU(kpte_page));
142 BUG_ON(PageCompound(kpte_page));
145 * Better fail early if someone sets the kernel text to NX.
146 * Does not cover __inittext
148 BUG_ON(address >= (unsigned long)&_text &&
149 address < (unsigned long)&_etext &&
150 (pgprot_val(prot) & _PAGE_NX));
152 if (level == 4) {
153 set_pte_atomic(kpte, mk_pte(page, canon_pgprot(prot)));
154 } else {
155 err = split_large_page(kpte, address);
156 if (!err)
157 goto repeat;
159 return err;
163 * change_page_attr_addr - Change page table attributes in linear mapping
164 * @address: Virtual address in linear mapping.
165 * @numpages: Number of pages to change
166 * @prot: New page table attribute (PAGE_*)
168 * Change page attributes of a page in the direct mapping. This is a variant
169 * of change_page_attr() that also works on memory holes that do not have
170 * mem_map entry (pfn_valid() is false).
172 * See change_page_attr() documentation for more details.
175 int change_page_attr_addr(unsigned long address, int numpages, pgprot_t prot)
177 int err = 0, kernel_map = 0, i;
179 #ifdef CONFIG_X86_64
180 if (address >= __START_KERNEL_map &&
181 address < __START_KERNEL_map + KERNEL_TEXT_SIZE) {
183 address = (unsigned long)__va(__pa(address));
184 kernel_map = 1;
186 #endif
188 for (i = 0; i < numpages; i++, address += PAGE_SIZE) {
189 unsigned long pfn = __pa(address) >> PAGE_SHIFT;
191 if (!kernel_map || pte_present(pfn_pte(0, prot))) {
192 err = __change_page_attr(address, pfn_to_page(pfn), prot);
193 if (err)
194 break;
196 #ifdef CONFIG_X86_64
198 * Handle kernel mapping too which aliases part of
199 * lowmem:
201 if (__pa(address) < KERNEL_TEXT_SIZE) {
202 unsigned long addr2;
203 pgprot_t prot2;
205 addr2 = __START_KERNEL_map + __pa(address);
206 /* Make sure the kernel mappings stay executable */
207 prot2 = pte_pgprot(pte_mkexec(pfn_pte(0, prot)));
208 err = __change_page_attr(addr2, pfn_to_page(pfn), prot2);
210 #endif
213 return err;
217 * change_page_attr - Change page table attributes in the linear mapping.
218 * @page: First page to change
219 * @numpages: Number of pages to change
220 * @prot: New protection/caching type (PAGE_*)
222 * Returns 0 on success, otherwise a negated errno.
224 * This should be used when a page is mapped with a different caching policy
225 * than write-back somewhere - some CPUs do not like it when mappings with
226 * different caching policies exist. This changes the page attributes of the
227 * in kernel linear mapping too.
229 * Caller must call global_flush_tlb() later to make the changes active.
231 * The caller needs to ensure that there are no conflicting mappings elsewhere
232 * (e.g. in user space) * This function only deals with the kernel linear map.
234 * For MMIO areas without mem_map use change_page_attr_addr() instead.
236 int change_page_attr(struct page *page, int numpages, pgprot_t prot)
238 unsigned long addr = (unsigned long)page_address(page);
240 return change_page_attr_addr(addr, numpages, prot);
242 EXPORT_SYMBOL(change_page_attr);
244 static void flush_kernel_map(void *arg)
247 * Flush all to work around Errata in early athlons regarding
248 * large page flushing.
250 __flush_tlb_all();
252 if (boot_cpu_data.x86_model >= 4)
253 wbinvd();
256 void global_flush_tlb(void)
258 BUG_ON(irqs_disabled());
260 on_each_cpu(flush_kernel_map, NULL, 1, 1);
262 EXPORT_SYMBOL(global_flush_tlb);
264 #ifdef CONFIG_DEBUG_PAGEALLOC
265 void kernel_map_pages(struct page *page, int numpages, int enable)
267 if (PageHighMem(page))
268 return;
269 if (!enable) {
270 debug_check_no_locks_freed(page_address(page),
271 numpages * PAGE_SIZE);
275 * If page allocator is not up yet then do not call c_p_a():
277 if (!debug_pagealloc_enabled)
278 return;
281 * the return value is ignored - the calls cannot fail,
282 * large pages are disabled at boot time.
284 change_page_attr(page, numpages, enable ? PAGE_KERNEL : __pgprot(0));
287 * we should perform an IPI and flush all tlbs,
288 * but that can deadlock->flush only current cpu.
290 __flush_tlb_all();
292 #endif