[NET]: Add netlink connector.
[linux-2.6/verdex.git] / mm / filemap_xip.c
blob8c199f537732088310c0e490cebe92c7414076bb
1 /*
2 * linux/mm/filemap_xip.c
4 * Copyright (C) 2005 IBM Corporation
5 * Author: Carsten Otte <cotte@de.ibm.com>
7 * derived from linux/mm/filemap.c - Copyright (C) Linus Torvalds
9 */
11 #include <linux/fs.h>
12 #include <linux/pagemap.h>
13 #include <linux/module.h>
14 #include <linux/uio.h>
15 #include <linux/rmap.h>
16 #include <asm/tlbflush.h>
17 #include "filemap.h"
20 * This is a file read routine for execute in place files, and uses
21 * the mapping->a_ops->get_xip_page() function for the actual low-level
22 * stuff.
24 * Note the struct file* is not used at all. It may be NULL.
26 static void
27 do_xip_mapping_read(struct address_space *mapping,
28 struct file_ra_state *_ra,
29 struct file *filp,
30 loff_t *ppos,
31 read_descriptor_t *desc,
32 read_actor_t actor)
34 struct inode *inode = mapping->host;
35 unsigned long index, end_index, offset;
36 loff_t isize;
38 BUG_ON(!mapping->a_ops->get_xip_page);
40 index = *ppos >> PAGE_CACHE_SHIFT;
41 offset = *ppos & ~PAGE_CACHE_MASK;
43 isize = i_size_read(inode);
44 if (!isize)
45 goto out;
47 end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
48 for (;;) {
49 struct page *page;
50 unsigned long nr, ret;
52 /* nr is the maximum number of bytes to copy from this page */
53 nr = PAGE_CACHE_SIZE;
54 if (index >= end_index) {
55 if (index > end_index)
56 goto out;
57 nr = ((isize - 1) & ~PAGE_CACHE_MASK) + 1;
58 if (nr <= offset) {
59 goto out;
62 nr = nr - offset;
64 page = mapping->a_ops->get_xip_page(mapping,
65 index*(PAGE_SIZE/512), 0);
66 if (!page)
67 goto no_xip_page;
68 if (unlikely(IS_ERR(page))) {
69 if (PTR_ERR(page) == -ENODATA) {
70 /* sparse */
71 page = ZERO_PAGE(0);
72 } else {
73 desc->error = PTR_ERR(page);
74 goto out;
78 /* If users can be writing to this page using arbitrary
79 * virtual addresses, take care about potential aliasing
80 * before reading the page on the kernel side.
82 if (mapping_writably_mapped(mapping))
83 flush_dcache_page(page);
86 * Ok, we have the page, so now we can copy it to user space...
88 * The actor routine returns how many bytes were actually used..
89 * NOTE! This may not be the same as how much of a user buffer
90 * we filled up (we may be padding etc), so we can only update
91 * "pos" here (the actor routine has to update the user buffer
92 * pointers and the remaining count).
94 ret = actor(desc, page, offset, nr);
95 offset += ret;
96 index += offset >> PAGE_CACHE_SHIFT;
97 offset &= ~PAGE_CACHE_MASK;
99 if (ret == nr && desc->count)
100 continue;
101 goto out;
103 no_xip_page:
104 /* Did not get the page. Report it */
105 desc->error = -EIO;
106 goto out;
109 out:
110 *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset;
111 if (filp)
112 file_accessed(filp);
115 ssize_t
116 xip_file_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos)
118 read_descriptor_t desc;
120 if (!access_ok(VERIFY_WRITE, buf, len))
121 return -EFAULT;
123 desc.written = 0;
124 desc.arg.buf = buf;
125 desc.count = len;
126 desc.error = 0;
128 do_xip_mapping_read(filp->f_mapping, &filp->f_ra, filp,
129 ppos, &desc, file_read_actor);
131 if (desc.written)
132 return desc.written;
133 else
134 return desc.error;
136 EXPORT_SYMBOL_GPL(xip_file_read);
138 ssize_t
139 xip_file_sendfile(struct file *in_file, loff_t *ppos,
140 size_t count, read_actor_t actor, void *target)
142 read_descriptor_t desc;
144 if (!count)
145 return 0;
147 desc.written = 0;
148 desc.count = count;
149 desc.arg.data = target;
150 desc.error = 0;
152 do_xip_mapping_read(in_file->f_mapping, &in_file->f_ra, in_file,
153 ppos, &desc, actor);
154 if (desc.written)
155 return desc.written;
156 return desc.error;
158 EXPORT_SYMBOL_GPL(xip_file_sendfile);
161 * __xip_unmap is invoked from xip_unmap and
162 * xip_write
164 * This function walks all vmas of the address_space and unmaps the
165 * ZERO_PAGE when found at pgoff. Should it go in rmap.c?
167 static void
168 __xip_unmap (struct address_space * mapping,
169 unsigned long pgoff)
171 struct vm_area_struct *vma;
172 struct mm_struct *mm;
173 struct prio_tree_iter iter;
174 unsigned long address;
175 pte_t *pte;
176 pte_t pteval;
178 spin_lock(&mapping->i_mmap_lock);
179 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
180 mm = vma->vm_mm;
181 address = vma->vm_start +
182 ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
183 BUG_ON(address < vma->vm_start || address >= vma->vm_end);
185 * We need the page_table_lock to protect us from page faults,
186 * munmap, fork, etc...
188 pte = page_check_address(ZERO_PAGE(address), mm,
189 address);
190 if (!IS_ERR(pte)) {
191 /* Nuke the page table entry. */
192 flush_cache_page(vma, address, pte_pfn(*pte));
193 pteval = ptep_clear_flush(vma, address, pte);
194 BUG_ON(pte_dirty(pteval));
195 pte_unmap(pte);
196 spin_unlock(&mm->page_table_lock);
199 spin_unlock(&mapping->i_mmap_lock);
203 * xip_nopage() is invoked via the vma operations vector for a
204 * mapped memory region to read in file data during a page fault.
206 * This function is derived from filemap_nopage, but used for execute in place
208 static struct page *
209 xip_file_nopage(struct vm_area_struct * area,
210 unsigned long address,
211 int *type)
213 struct file *file = area->vm_file;
214 struct address_space *mapping = file->f_mapping;
215 struct inode *inode = mapping->host;
216 struct page *page;
217 unsigned long size, pgoff, endoff;
219 pgoff = ((address - area->vm_start) >> PAGE_CACHE_SHIFT)
220 + area->vm_pgoff;
221 endoff = ((area->vm_end - area->vm_start) >> PAGE_CACHE_SHIFT)
222 + area->vm_pgoff;
224 size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
225 if (pgoff >= size) {
226 return NULL;
229 page = mapping->a_ops->get_xip_page(mapping, pgoff*(PAGE_SIZE/512), 0);
230 if (!IS_ERR(page)) {
231 return page;
233 if (PTR_ERR(page) != -ENODATA)
234 return NULL;
236 /* sparse block */
237 if ((area->vm_flags & (VM_WRITE | VM_MAYWRITE)) &&
238 (area->vm_flags & (VM_SHARED| VM_MAYSHARE)) &&
239 (!(mapping->host->i_sb->s_flags & MS_RDONLY))) {
240 /* maybe shared writable, allocate new block */
241 page = mapping->a_ops->get_xip_page (mapping,
242 pgoff*(PAGE_SIZE/512), 1);
243 if (IS_ERR(page))
244 return NULL;
245 /* unmap page at pgoff from all other vmas */
246 __xip_unmap(mapping, pgoff);
247 } else {
248 /* not shared and writable, use ZERO_PAGE() */
249 page = ZERO_PAGE(address);
252 return page;
255 static struct vm_operations_struct xip_file_vm_ops = {
256 .nopage = xip_file_nopage,
259 int xip_file_mmap(struct file * file, struct vm_area_struct * vma)
261 BUG_ON(!file->f_mapping->a_ops->get_xip_page);
263 file_accessed(file);
264 vma->vm_ops = &xip_file_vm_ops;
265 return 0;
267 EXPORT_SYMBOL_GPL(xip_file_mmap);
269 static ssize_t
270 __xip_file_write(struct file *filp, const char __user *buf,
271 size_t count, loff_t pos, loff_t *ppos)
273 struct address_space * mapping = filp->f_mapping;
274 struct address_space_operations *a_ops = mapping->a_ops;
275 struct inode *inode = mapping->host;
276 long status = 0;
277 struct page *page;
278 size_t bytes;
279 ssize_t written = 0;
281 BUG_ON(!mapping->a_ops->get_xip_page);
283 do {
284 unsigned long index;
285 unsigned long offset;
286 size_t copied;
288 offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
289 index = pos >> PAGE_CACHE_SHIFT;
290 bytes = PAGE_CACHE_SIZE - offset;
291 if (bytes > count)
292 bytes = count;
295 * Bring in the user page that we will copy from _first_.
296 * Otherwise there's a nasty deadlock on copying from the
297 * same page as we're writing to, without it being marked
298 * up-to-date.
300 fault_in_pages_readable(buf, bytes);
302 page = a_ops->get_xip_page(mapping,
303 index*(PAGE_SIZE/512), 0);
304 if (IS_ERR(page) && (PTR_ERR(page) == -ENODATA)) {
305 /* we allocate a new page unmap it */
306 page = a_ops->get_xip_page(mapping,
307 index*(PAGE_SIZE/512), 1);
308 if (!IS_ERR(page))
309 /* unmap page at pgoff from all other vmas */
310 __xip_unmap(mapping, index);
313 if (IS_ERR(page)) {
314 status = PTR_ERR(page);
315 break;
318 copied = filemap_copy_from_user(page, offset, buf, bytes);
319 flush_dcache_page(page);
320 if (likely(copied > 0)) {
321 status = copied;
323 if (status >= 0) {
324 written += status;
325 count -= status;
326 pos += status;
327 buf += status;
330 if (unlikely(copied != bytes))
331 if (status >= 0)
332 status = -EFAULT;
333 if (status < 0)
334 break;
335 } while (count);
336 *ppos = pos;
338 * No need to use i_size_read() here, the i_size
339 * cannot change under us because we hold i_sem.
341 if (pos > inode->i_size) {
342 i_size_write(inode, pos);
343 mark_inode_dirty(inode);
346 return written ? written : status;
349 ssize_t
350 xip_file_write(struct file *filp, const char __user *buf, size_t len,
351 loff_t *ppos)
353 struct address_space *mapping = filp->f_mapping;
354 struct inode *inode = mapping->host;
355 size_t count;
356 loff_t pos;
357 ssize_t ret;
359 down(&inode->i_sem);
361 if (!access_ok(VERIFY_READ, buf, len)) {
362 ret=-EFAULT;
363 goto out_up;
366 pos = *ppos;
367 count = len;
369 vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
371 /* We can write back this queue in page reclaim */
372 current->backing_dev_info = mapping->backing_dev_info;
374 ret = generic_write_checks(filp, &pos, &count, S_ISBLK(inode->i_mode));
375 if (ret)
376 goto out_backing;
377 if (count == 0)
378 goto out_backing;
380 ret = remove_suid(filp->f_dentry);
381 if (ret)
382 goto out_backing;
384 inode_update_time(inode, 1);
386 ret = __xip_file_write (filp, buf, count, pos, ppos);
388 out_backing:
389 current->backing_dev_info = NULL;
390 out_up:
391 up(&inode->i_sem);
392 return ret;
394 EXPORT_SYMBOL_GPL(xip_file_write);
397 * truncate a page used for execute in place
398 * functionality is analog to block_truncate_page but does use get_xip_page
399 * to get the page instead of page cache
402 xip_truncate_page(struct address_space *mapping, loff_t from)
404 pgoff_t index = from >> PAGE_CACHE_SHIFT;
405 unsigned offset = from & (PAGE_CACHE_SIZE-1);
406 unsigned blocksize;
407 unsigned length;
408 struct page *page;
409 void *kaddr;
411 BUG_ON(!mapping->a_ops->get_xip_page);
413 blocksize = 1 << mapping->host->i_blkbits;
414 length = offset & (blocksize - 1);
416 /* Block boundary? Nothing to do */
417 if (!length)
418 return 0;
420 length = blocksize - length;
422 page = mapping->a_ops->get_xip_page(mapping,
423 index*(PAGE_SIZE/512), 0);
424 if (!page)
425 return -ENOMEM;
426 if (unlikely(IS_ERR(page))) {
427 if (PTR_ERR(page) == -ENODATA)
428 /* Hole? No need to truncate */
429 return 0;
430 else
431 return PTR_ERR(page);
433 kaddr = kmap_atomic(page, KM_USER0);
434 memset(kaddr + offset, 0, length);
435 kunmap_atomic(kaddr, KM_USER0);
437 flush_dcache_page(page);
438 return 0;
440 EXPORT_SYMBOL_GPL(xip_truncate_page);