x86: further cpa largepage-split cleanups
[wrt350n-kernel.git] / mm / fremap.c
blob14bd3bf7826edcecd2f61e6465b61078a84b2340
1 /*
2 * linux/mm/fremap.c
3 *
4 * Explicit pagetable population and nonlinear (random) mappings support.
6 * started by Ingo Molnar, Copyright (C) 2002, 2003
7 */
8 #include <linux/backing-dev.h>
9 #include <linux/mm.h>
10 #include <linux/swap.h>
11 #include <linux/file.h>
12 #include <linux/mman.h>
13 #include <linux/pagemap.h>
14 #include <linux/swapops.h>
15 #include <linux/rmap.h>
16 #include <linux/module.h>
17 #include <linux/syscalls.h>
19 #include <asm/mmu_context.h>
20 #include <asm/cacheflush.h>
21 #include <asm/tlbflush.h>
23 static void zap_pte(struct mm_struct *mm, struct vm_area_struct *vma,
24 unsigned long addr, pte_t *ptep)
26 pte_t pte = *ptep;
28 if (pte_present(pte)) {
29 struct page *page;
31 flush_cache_page(vma, addr, pte_pfn(pte));
32 pte = ptep_clear_flush(vma, addr, ptep);
33 page = vm_normal_page(vma, addr, pte);
34 if (page) {
35 if (pte_dirty(pte))
36 set_page_dirty(page);
37 page_remove_rmap(page, vma);
38 page_cache_release(page);
39 update_hiwater_rss(mm);
40 dec_mm_counter(mm, file_rss);
42 } else {
43 if (!pte_file(pte))
44 free_swap_and_cache(pte_to_swp_entry(pte));
45 pte_clear_not_present_full(mm, addr, ptep, 0);
50 * Install a file pte to a given virtual memory address, release any
51 * previously existing mapping.
53 static int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma,
54 unsigned long addr, unsigned long pgoff, pgprot_t prot)
56 int err = -ENOMEM;
57 pte_t *pte;
58 spinlock_t *ptl;
60 pte = get_locked_pte(mm, addr, &ptl);
61 if (!pte)
62 goto out;
64 if (!pte_none(*pte))
65 zap_pte(mm, vma, addr, pte);
67 set_pte_at(mm, addr, pte, pgoff_to_pte(pgoff));
69 * We don't need to run update_mmu_cache() here because the "file pte"
70 * being installed by install_file_pte() is not a real pte - it's a
71 * non-present entry (like a swap entry), noting what file offset should
72 * be mapped there when there's a fault (in a non-linear vma where
73 * that's not obvious).
75 pte_unmap_unlock(pte, ptl);
76 err = 0;
77 out:
78 return err;
81 static int populate_range(struct mm_struct *mm, struct vm_area_struct *vma,
82 unsigned long addr, unsigned long size, pgoff_t pgoff)
84 int err;
86 do {
87 err = install_file_pte(mm, vma, addr, pgoff, vma->vm_page_prot);
88 if (err)
89 return err;
91 size -= PAGE_SIZE;
92 addr += PAGE_SIZE;
93 pgoff++;
94 } while (size);
96 return 0;
101 * sys_remap_file_pages - remap arbitrary pages of an existing VM_SHARED vma
102 * @start: start of the remapped virtual memory range
103 * @size: size of the remapped virtual memory range
104 * @prot: new protection bits of the range (see NOTE)
105 * @pgoff: to-be-mapped page of the backing store file
106 * @flags: 0 or MAP_NONBLOCKED - the later will cause no IO.
108 * sys_remap_file_pages remaps arbitrary pages of an existing VM_SHARED vma
109 * (shared backing store file).
111 * This syscall works purely via pagetables, so it's the most efficient
112 * way to map the same (large) file into a given virtual window. Unlike
113 * mmap()/mremap() it does not create any new vmas. The new mappings are
114 * also safe across swapout.
116 * NOTE: the 'prot' parameter right now is ignored (but must be zero),
117 * and the vma's default protection is used. Arbitrary protections
118 * might be implemented in the future.
120 asmlinkage long sys_remap_file_pages(unsigned long start, unsigned long size,
121 unsigned long prot, unsigned long pgoff, unsigned long flags)
123 struct mm_struct *mm = current->mm;
124 struct address_space *mapping;
125 unsigned long end = start + size;
126 struct vm_area_struct *vma;
127 int err = -EINVAL;
128 int has_write_lock = 0;
130 if (prot)
131 return err;
133 * Sanitize the syscall parameters:
135 start = start & PAGE_MASK;
136 size = size & PAGE_MASK;
138 /* Does the address range wrap, or is the span zero-sized? */
139 if (start + size <= start)
140 return err;
142 /* Can we represent this offset inside this architecture's pte's? */
143 #if PTE_FILE_MAX_BITS < BITS_PER_LONG
144 if (pgoff + (size >> PAGE_SHIFT) >= (1UL << PTE_FILE_MAX_BITS))
145 return err;
146 #endif
148 /* We need down_write() to change vma->vm_flags. */
149 down_read(&mm->mmap_sem);
150 retry:
151 vma = find_vma(mm, start);
154 * Make sure the vma is shared, that it supports prefaulting,
155 * and that the remapped range is valid and fully within
156 * the single existing vma. vm_private_data is used as a
157 * swapout cursor in a VM_NONLINEAR vma.
159 if (!vma || !(vma->vm_flags & VM_SHARED))
160 goto out;
162 if (vma->vm_private_data && !(vma->vm_flags & VM_NONLINEAR))
163 goto out;
165 if (!(vma->vm_flags & VM_CAN_NONLINEAR))
166 goto out;
168 if (end <= start || start < vma->vm_start || end > vma->vm_end)
169 goto out;
171 /* Must set VM_NONLINEAR before any pages are populated. */
172 if (!(vma->vm_flags & VM_NONLINEAR)) {
173 /* Don't need a nonlinear mapping, exit success */
174 if (pgoff == linear_page_index(vma, start)) {
175 err = 0;
176 goto out;
179 if (!has_write_lock) {
180 up_read(&mm->mmap_sem);
181 down_write(&mm->mmap_sem);
182 has_write_lock = 1;
183 goto retry;
185 mapping = vma->vm_file->f_mapping;
187 * page_mkclean doesn't work on nonlinear vmas, so if
188 * dirty pages need to be accounted, emulate with linear
189 * vmas.
191 if (mapping_cap_account_dirty(mapping)) {
192 unsigned long addr;
194 flags &= MAP_NONBLOCK;
195 addr = mmap_region(vma->vm_file, start, size,
196 flags, vma->vm_flags, pgoff, 1);
197 if (IS_ERR_VALUE(addr)) {
198 err = addr;
199 } else {
200 BUG_ON(addr != start);
201 err = 0;
203 goto out;
205 spin_lock(&mapping->i_mmap_lock);
206 flush_dcache_mmap_lock(mapping);
207 vma->vm_flags |= VM_NONLINEAR;
208 vma_prio_tree_remove(vma, &mapping->i_mmap);
209 vma_nonlinear_insert(vma, &mapping->i_mmap_nonlinear);
210 flush_dcache_mmap_unlock(mapping);
211 spin_unlock(&mapping->i_mmap_lock);
214 err = populate_range(mm, vma, start, size, pgoff);
215 if (!err && !(flags & MAP_NONBLOCK)) {
216 if (unlikely(has_write_lock)) {
217 downgrade_write(&mm->mmap_sem);
218 has_write_lock = 0;
220 make_pages_present(start, start+size);
224 * We can't clear VM_NONLINEAR because we'd have to do
225 * it after ->populate completes, and that would prevent
226 * downgrading the lock. (Locks can't be upgraded).
229 out:
230 if (likely(!has_write_lock))
231 up_read(&mm->mmap_sem);
232 else
233 up_write(&mm->mmap_sem);
235 return err;