locking, latencytop: Annotate latency_lock as raw
[linux-2.6/linux-mips.git] / mm / filemap_xip.c
blob93356cd12828a40eb4d635a9e0e0bea3d6ba0790
1 /*
2 * linux/mm/filemap_xip.c
4 * Copyright (C) 2005 IBM Corporation
5 * Author: Carsten Otte <cotte@de.ibm.com>
7 * derived from linux/mm/filemap.c - Copyright (C) Linus Torvalds
9 */
11 #include <linux/fs.h>
12 #include <linux/pagemap.h>
13 #include <linux/module.h>
14 #include <linux/uio.h>
15 #include <linux/rmap.h>
16 #include <linux/mmu_notifier.h>
17 #include <linux/sched.h>
18 #include <linux/seqlock.h>
19 #include <linux/mutex.h>
20 #include <linux/gfp.h>
21 #include <asm/tlbflush.h>
22 #include <asm/io.h>
25 * We do use our own empty page to avoid interference with other users
26 * of ZERO_PAGE(), such as /dev/zero
28 static DEFINE_MUTEX(xip_sparse_mutex);
29 static seqcount_t xip_sparse_seq = SEQCNT_ZERO;
30 static struct page *__xip_sparse_page;
32 /* called under xip_sparse_mutex */
33 static struct page *xip_sparse_page(void)
35 if (!__xip_sparse_page) {
36 struct page *page = alloc_page(GFP_HIGHUSER | __GFP_ZERO);
38 if (page)
39 __xip_sparse_page = page;
41 return __xip_sparse_page;
45 * This is a file read routine for execute in place files, and uses
46 * the mapping->a_ops->get_xip_mem() function for the actual low-level
47 * stuff.
49 * Note the struct file* is not used at all. It may be NULL.
51 static ssize_t
52 do_xip_mapping_read(struct address_space *mapping,
53 struct file_ra_state *_ra,
54 struct file *filp,
55 char __user *buf,
56 size_t len,
57 loff_t *ppos)
59 struct inode *inode = mapping->host;
60 pgoff_t index, end_index;
61 unsigned long offset;
62 loff_t isize, pos;
63 size_t copied = 0, error = 0;
65 BUG_ON(!mapping->a_ops->get_xip_mem);
67 pos = *ppos;
68 index = pos >> PAGE_CACHE_SHIFT;
69 offset = pos & ~PAGE_CACHE_MASK;
71 isize = i_size_read(inode);
72 if (!isize)
73 goto out;
75 end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
76 do {
77 unsigned long nr, left;
78 void *xip_mem;
79 unsigned long xip_pfn;
80 int zero = 0;
82 /* nr is the maximum number of bytes to copy from this page */
83 nr = PAGE_CACHE_SIZE;
84 if (index >= end_index) {
85 if (index > end_index)
86 goto out;
87 nr = ((isize - 1) & ~PAGE_CACHE_MASK) + 1;
88 if (nr <= offset) {
89 goto out;
92 nr = nr - offset;
93 if (nr > len - copied)
94 nr = len - copied;
96 error = mapping->a_ops->get_xip_mem(mapping, index, 0,
97 &xip_mem, &xip_pfn);
98 if (unlikely(error)) {
99 if (error == -ENODATA) {
100 /* sparse */
101 zero = 1;
102 } else
103 goto out;
106 /* If users can be writing to this page using arbitrary
107 * virtual addresses, take care about potential aliasing
108 * before reading the page on the kernel side.
110 if (mapping_writably_mapped(mapping))
111 /* address based flush */ ;
114 * Ok, we have the mem, so now we can copy it to user space...
116 * The actor routine returns how many bytes were actually used..
117 * NOTE! This may not be the same as how much of a user buffer
118 * we filled up (we may be padding etc), so we can only update
119 * "pos" here (the actor routine has to update the user buffer
120 * pointers and the remaining count).
122 if (!zero)
123 left = __copy_to_user(buf+copied, xip_mem+offset, nr);
124 else
125 left = __clear_user(buf + copied, nr);
127 if (left) {
128 error = -EFAULT;
129 goto out;
132 copied += (nr - left);
133 offset += (nr - left);
134 index += offset >> PAGE_CACHE_SHIFT;
135 offset &= ~PAGE_CACHE_MASK;
136 } while (copied < len);
138 out:
139 *ppos = pos + copied;
140 if (filp)
141 file_accessed(filp);
143 return (copied ? copied : error);
146 ssize_t
147 xip_file_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos)
149 if (!access_ok(VERIFY_WRITE, buf, len))
150 return -EFAULT;
152 return do_xip_mapping_read(filp->f_mapping, &filp->f_ra, filp,
153 buf, len, ppos);
155 EXPORT_SYMBOL_GPL(xip_file_read);
158 * __xip_unmap is invoked from xip_unmap and
159 * xip_write
161 * This function walks all vmas of the address_space and unmaps the
162 * __xip_sparse_page when found at pgoff.
164 static void
165 __xip_unmap (struct address_space * mapping,
166 unsigned long pgoff)
168 struct vm_area_struct *vma;
169 struct mm_struct *mm;
170 struct prio_tree_iter iter;
171 unsigned long address;
172 pte_t *pte;
173 pte_t pteval;
174 spinlock_t *ptl;
175 struct page *page;
176 unsigned count;
177 int locked = 0;
179 count = read_seqcount_begin(&xip_sparse_seq);
181 page = __xip_sparse_page;
182 if (!page)
183 return;
185 retry:
186 mutex_lock(&mapping->i_mmap_mutex);
187 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
188 mm = vma->vm_mm;
189 address = vma->vm_start +
190 ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
191 BUG_ON(address < vma->vm_start || address >= vma->vm_end);
192 pte = page_check_address(page, mm, address, &ptl, 1);
193 if (pte) {
194 /* Nuke the page table entry. */
195 flush_cache_page(vma, address, pte_pfn(*pte));
196 pteval = ptep_clear_flush_notify(vma, address, pte);
197 page_remove_rmap(page);
198 dec_mm_counter(mm, MM_FILEPAGES);
199 BUG_ON(pte_dirty(pteval));
200 pte_unmap_unlock(pte, ptl);
201 page_cache_release(page);
204 mutex_unlock(&mapping->i_mmap_mutex);
206 if (locked) {
207 mutex_unlock(&xip_sparse_mutex);
208 } else if (read_seqcount_retry(&xip_sparse_seq, count)) {
209 mutex_lock(&xip_sparse_mutex);
210 locked = 1;
211 goto retry;
216 * xip_fault() is invoked via the vma operations vector for a
217 * mapped memory region to read in file data during a page fault.
219 * This function is derived from filemap_fault, but used for execute in place
221 static int xip_file_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
223 struct file *file = vma->vm_file;
224 struct address_space *mapping = file->f_mapping;
225 struct inode *inode = mapping->host;
226 pgoff_t size;
227 void *xip_mem;
228 unsigned long xip_pfn;
229 struct page *page;
230 int error;
232 /* XXX: are VM_FAULT_ codes OK? */
233 again:
234 size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
235 if (vmf->pgoff >= size)
236 return VM_FAULT_SIGBUS;
238 error = mapping->a_ops->get_xip_mem(mapping, vmf->pgoff, 0,
239 &xip_mem, &xip_pfn);
240 if (likely(!error))
241 goto found;
242 if (error != -ENODATA)
243 return VM_FAULT_OOM;
245 /* sparse block */
246 if ((vma->vm_flags & (VM_WRITE | VM_MAYWRITE)) &&
247 (vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) &&
248 (!(mapping->host->i_sb->s_flags & MS_RDONLY))) {
249 int err;
251 /* maybe shared writable, allocate new block */
252 mutex_lock(&xip_sparse_mutex);
253 error = mapping->a_ops->get_xip_mem(mapping, vmf->pgoff, 1,
254 &xip_mem, &xip_pfn);
255 mutex_unlock(&xip_sparse_mutex);
256 if (error)
257 return VM_FAULT_SIGBUS;
258 /* unmap sparse mappings at pgoff from all other vmas */
259 __xip_unmap(mapping, vmf->pgoff);
261 found:
262 err = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address,
263 xip_pfn);
264 if (err == -ENOMEM)
265 return VM_FAULT_OOM;
266 BUG_ON(err);
267 return VM_FAULT_NOPAGE;
268 } else {
269 int err, ret = VM_FAULT_OOM;
271 mutex_lock(&xip_sparse_mutex);
272 write_seqcount_begin(&xip_sparse_seq);
273 error = mapping->a_ops->get_xip_mem(mapping, vmf->pgoff, 0,
274 &xip_mem, &xip_pfn);
275 if (unlikely(!error)) {
276 write_seqcount_end(&xip_sparse_seq);
277 mutex_unlock(&xip_sparse_mutex);
278 goto again;
280 if (error != -ENODATA)
281 goto out;
282 /* not shared and writable, use xip_sparse_page() */
283 page = xip_sparse_page();
284 if (!page)
285 goto out;
286 err = vm_insert_page(vma, (unsigned long)vmf->virtual_address,
287 page);
288 if (err == -ENOMEM)
289 goto out;
291 ret = VM_FAULT_NOPAGE;
292 out:
293 write_seqcount_end(&xip_sparse_seq);
294 mutex_unlock(&xip_sparse_mutex);
296 return ret;
300 static const struct vm_operations_struct xip_file_vm_ops = {
301 .fault = xip_file_fault,
304 int xip_file_mmap(struct file * file, struct vm_area_struct * vma)
306 BUG_ON(!file->f_mapping->a_ops->get_xip_mem);
308 file_accessed(file);
309 vma->vm_ops = &xip_file_vm_ops;
310 vma->vm_flags |= VM_CAN_NONLINEAR | VM_MIXEDMAP;
311 return 0;
313 EXPORT_SYMBOL_GPL(xip_file_mmap);
315 static ssize_t
316 __xip_file_write(struct file *filp, const char __user *buf,
317 size_t count, loff_t pos, loff_t *ppos)
319 struct address_space * mapping = filp->f_mapping;
320 const struct address_space_operations *a_ops = mapping->a_ops;
321 struct inode *inode = mapping->host;
322 long status = 0;
323 size_t bytes;
324 ssize_t written = 0;
326 BUG_ON(!mapping->a_ops->get_xip_mem);
328 do {
329 unsigned long index;
330 unsigned long offset;
331 size_t copied;
332 void *xip_mem;
333 unsigned long xip_pfn;
335 offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
336 index = pos >> PAGE_CACHE_SHIFT;
337 bytes = PAGE_CACHE_SIZE - offset;
338 if (bytes > count)
339 bytes = count;
341 status = a_ops->get_xip_mem(mapping, index, 0,
342 &xip_mem, &xip_pfn);
343 if (status == -ENODATA) {
344 /* we allocate a new page unmap it */
345 mutex_lock(&xip_sparse_mutex);
346 status = a_ops->get_xip_mem(mapping, index, 1,
347 &xip_mem, &xip_pfn);
348 mutex_unlock(&xip_sparse_mutex);
349 if (!status)
350 /* unmap page at pgoff from all other vmas */
351 __xip_unmap(mapping, index);
354 if (status)
355 break;
357 copied = bytes -
358 __copy_from_user_nocache(xip_mem + offset, buf, bytes);
360 if (likely(copied > 0)) {
361 status = copied;
363 if (status >= 0) {
364 written += status;
365 count -= status;
366 pos += status;
367 buf += status;
370 if (unlikely(copied != bytes))
371 if (status >= 0)
372 status = -EFAULT;
373 if (status < 0)
374 break;
375 } while (count);
376 *ppos = pos;
378 * No need to use i_size_read() here, the i_size
379 * cannot change under us because we hold i_mutex.
381 if (pos > inode->i_size) {
382 i_size_write(inode, pos);
383 mark_inode_dirty(inode);
386 return written ? written : status;
389 ssize_t
390 xip_file_write(struct file *filp, const char __user *buf, size_t len,
391 loff_t *ppos)
393 struct address_space *mapping = filp->f_mapping;
394 struct inode *inode = mapping->host;
395 size_t count;
396 loff_t pos;
397 ssize_t ret;
399 mutex_lock(&inode->i_mutex);
401 if (!access_ok(VERIFY_READ, buf, len)) {
402 ret=-EFAULT;
403 goto out_up;
406 pos = *ppos;
407 count = len;
409 vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
411 /* We can write back this queue in page reclaim */
412 current->backing_dev_info = mapping->backing_dev_info;
414 ret = generic_write_checks(filp, &pos, &count, S_ISBLK(inode->i_mode));
415 if (ret)
416 goto out_backing;
417 if (count == 0)
418 goto out_backing;
420 ret = file_remove_suid(filp);
421 if (ret)
422 goto out_backing;
424 file_update_time(filp);
426 ret = __xip_file_write (filp, buf, count, pos, ppos);
428 out_backing:
429 current->backing_dev_info = NULL;
430 out_up:
431 mutex_unlock(&inode->i_mutex);
432 return ret;
434 EXPORT_SYMBOL_GPL(xip_file_write);
437 * truncate a page used for execute in place
438 * functionality is analog to block_truncate_page but does use get_xip_mem
439 * to get the page instead of page cache
442 xip_truncate_page(struct address_space *mapping, loff_t from)
444 pgoff_t index = from >> PAGE_CACHE_SHIFT;
445 unsigned offset = from & (PAGE_CACHE_SIZE-1);
446 unsigned blocksize;
447 unsigned length;
448 void *xip_mem;
449 unsigned long xip_pfn;
450 int err;
452 BUG_ON(!mapping->a_ops->get_xip_mem);
454 blocksize = 1 << mapping->host->i_blkbits;
455 length = offset & (blocksize - 1);
457 /* Block boundary? Nothing to do */
458 if (!length)
459 return 0;
461 length = blocksize - length;
463 err = mapping->a_ops->get_xip_mem(mapping, index, 0,
464 &xip_mem, &xip_pfn);
465 if (unlikely(err)) {
466 if (err == -ENODATA)
467 /* Hole? No need to truncate */
468 return 0;
469 else
470 return err;
472 memset(xip_mem + offset, 0, length);
473 return 0;
475 EXPORT_SYMBOL_GPL(xip_truncate_page);