fix
[fuse.git] / kernel / file.c
blobc2a13389646d8424ea6238855db66e51f44638f4
1 /*
2 FUSE: Filesystem in Userspace
3 Copyright (C) 2001-2004 Miklos Szeredi <miklos@szeredi.hu>
5 This program can be distributed under the terms of the GNU GPL.
6 See the file COPYING.
7 */
8 #include "fuse_i.h"
10 #include <linux/pagemap.h>
11 #include <linux/slab.h>
12 #include <asm/uaccess.h>
13 #ifdef KERNEL_2_6
14 #include <linux/backing-dev.h>
15 #include <linux/writeback.h>
16 #endif
18 #ifndef KERNEL_2_6
19 #define PageUptodate(page) Page_Uptodate(page)
20 #endif
22 static int user_mmap;
23 #ifdef KERNEL_2_6
24 #include <linux/moduleparam.h>
25 module_param(user_mmap, int, 0644);
26 #else
27 MODULE_PARM(user_mmap, "i");
28 #endif
30 MODULE_PARM_DESC(user_mmap, "Allow non root user to create a shared writable mapping");
33 static int fuse_open(struct inode *inode, struct file *file)
35 struct fuse_conn *fc = INO_FC(inode);
36 struct fuse_req *req;
37 struct fuse_open_in inarg;
38 struct fuse_open_out outarg;
39 struct fuse_file *ff;
40 int err;
42 err = generic_file_open(inode, file);
43 if (err)
44 return err;
46 /* If opening the root node, no lookup has been performed on
47 it, so the attributes must be refreshed */
48 if (inode->i_ino == FUSE_ROOT_INO) {
49 int err = fuse_do_getattr(inode);
50 if (err)
51 return err;
54 down(&inode->i_sem);
55 err = -ERESTARTSYS;
56 req = fuse_get_request(fc);
57 if (!req)
58 goto out;
60 err = -ENOMEM;
61 ff = kmalloc(sizeof(struct fuse_file), GFP_KERNEL);
62 if (!ff)
63 goto out_put_request;
65 ff->release_req = fuse_request_alloc();
66 if (!ff->release_req) {
67 kfree(ff);
68 goto out_put_request;
72 memset(&inarg, 0, sizeof(inarg));
73 inarg.flags = file->f_flags & ~O_EXCL;
74 req->in.h.opcode = FUSE_OPEN;
75 req->in.h.ino = inode->i_ino;
76 req->in.numargs = 1;
77 req->in.args[0].size = sizeof(inarg);
78 req->in.args[0].value = &inarg;
79 req->out.numargs = 1;
80 req->out.args[0].size = sizeof(outarg);
81 req->out.args[0].value = &outarg;
82 request_send(fc, req);
83 err = req->out.h.error;
84 if (!err && !(fc->flags & FUSE_KERNEL_CACHE)) {
85 #ifdef KERNEL_2_6
86 invalidate_inode_pages(inode->i_mapping);
87 #else
88 invalidate_inode_pages(inode);
89 #endif
91 if (err) {
92 fuse_request_free(ff->release_req);
93 kfree(ff);
95 else {
96 ff->fh = outarg.fh;
97 file->private_data = ff;
98 INIT_LIST_HEAD(&ff->ff_list);
101 out_put_request:
102 fuse_put_request(fc, req);
103 out:
104 up(&inode->i_sem);
105 return err;
108 void fuse_sync_inode(struct inode *inode)
110 #ifdef KERNEL_2_6
111 filemap_fdatawrite(inode->i_mapping);
112 filemap_fdatawait(inode->i_mapping);
113 #else
114 #ifndef NO_MM
115 filemap_fdatasync(inode->i_mapping);
116 filemap_fdatawait(inode->i_mapping);
117 #endif
118 #endif
121 static int fuse_release(struct inode *inode, struct file *file)
123 struct fuse_conn *fc = INO_FC(inode);
124 struct fuse_inode *fi = INO_FI(inode);
125 struct fuse_release_in *inarg;
126 struct fuse_file *ff = file->private_data;
127 struct fuse_req *req = ff->release_req;
129 down(&inode->i_sem);
130 if (file->f_mode & FMODE_WRITE)
131 fuse_sync_inode(inode);
133 if (!list_empty(&ff->ff_list)) {
134 down_write(&fi->write_sem);
135 list_del(&ff->ff_list);
136 up_write(&fi->write_sem);
139 inarg = &req->misc.release_in;
140 inarg->fh = ff->fh;
141 inarg->flags = file->f_flags & ~O_EXCL;
142 req->in.h.opcode = FUSE_RELEASE;
143 req->in.h.ino = inode->i_ino;
144 req->in.numargs = 1;
145 req->in.args[0].size = sizeof(struct fuse_release_in);
146 req->in.args[0].value = inarg;
147 request_send_nonint(fc, req);
148 fuse_put_request(fc, req);
149 kfree(ff);
150 up(&inode->i_sem);
152 /* Return value is ignored by VFS */
153 return 0;
156 static int fuse_flush(struct file *file)
158 struct inode *inode = file->f_dentry->d_inode;
159 struct fuse_conn *fc = INO_FC(inode);
160 struct fuse_file *ff = file->private_data;
161 struct fuse_req *req = ff->release_req;
162 struct fuse_flush_in inarg;
163 int err;
165 if (fc->no_flush)
166 return 0;
168 down(&inode->i_sem);
169 memset(&inarg, 0, sizeof(inarg));
170 inarg.fh = ff->fh;
171 req->in.h.opcode = FUSE_FLUSH;
172 req->in.h.ino = inode->i_ino;
173 req->in.numargs = 1;
174 req->in.args[0].size = sizeof(inarg);
175 req->in.args[0].value = &inarg;
176 request_send_nonint(fc, req);
177 err = req->out.h.error;
178 fuse_reset_request(req);
179 up(&inode->i_sem);
180 if (err == -ENOSYS) {
181 fc->no_flush = 1;
182 err = 0;
184 return err;
187 static int fuse_fsync(struct file *file, struct dentry *de, int datasync)
189 struct inode *inode = de->d_inode;
190 struct fuse_inode *fi = INO_FI(inode);
191 struct fuse_conn *fc = INO_FC(inode);
192 struct fuse_file *ff = file->private_data;
193 struct fuse_req *req;
194 struct fuse_fsync_in inarg;
195 int err;
197 if (fc->no_fsync)
198 return 0;
200 req = fuse_get_request(fc);
201 if (!req)
202 return -ERESTARTSYS;
204 /* Make sure all writes to this inode are completed before
205 issuing the FSYNC request */
206 down_write(&fi->write_sem);
207 up_write(&fi->write_sem);
209 memset(&inarg, 0, sizeof(inarg));
210 inarg.fh = ff->fh;
211 inarg.datasync = datasync;
212 req->in.h.opcode = FUSE_FSYNC;
213 req->in.h.ino = inode->i_ino;
214 req->in.numargs = 1;
215 req->in.args[0].size = sizeof(inarg);
216 req->in.args[0].value = &inarg;
217 request_send(fc, req);
218 err = req->out.h.error;
219 if (err == -ENOSYS) {
220 fc->no_fsync = 1;
221 err = 0;
223 fuse_put_request(fc, req);
224 return err;
227 static ssize_t fuse_send_read(struct file *file, struct inode *inode,
228 char *buf, loff_t pos, size_t count)
230 struct fuse_conn *fc = INO_FC(inode);
231 struct fuse_file *ff = file->private_data;
232 struct fuse_req *req;
233 struct fuse_read_in inarg;
234 ssize_t res;
236 req = fuse_get_request_nonint(fc);
237 memset(&inarg, 0, sizeof(inarg));
238 inarg.fh = ff->fh;
239 inarg.offset = pos;
240 inarg.size = count;
241 req->in.h.opcode = FUSE_READ;
242 req->in.h.ino = inode->i_ino;
243 req->in.numargs = 1;
244 req->in.args[0].size = sizeof(inarg);
245 req->in.args[0].value = &inarg;
246 req->out.argvar = 1;
247 req->out.numargs = 1;
248 req->out.args[0].size = count;
249 req->out.args[0].value = buf;
250 request_send(fc, req);
251 res = req->out.h.error;
252 if (!res)
253 res = req->out.args[0].size;
254 fuse_put_request(fc, req);
255 return res;
259 static int fuse_readpage(struct file *file, struct page *page)
261 struct inode *inode = page->mapping->host;
262 char *buffer;
263 ssize_t res;
264 loff_t pos;
266 pos = (loff_t) page->index << PAGE_CACHE_SHIFT;
267 buffer = kmap(page);
268 res = fuse_send_read(file, inode, buffer, pos, PAGE_CACHE_SIZE);
269 if (res >= 0) {
270 if (res < PAGE_CACHE_SIZE)
271 memset(buffer + res, 0, PAGE_CACHE_SIZE - res);
272 flush_dcache_page(page);
273 SetPageUptodate(page);
274 res = 0;
276 kunmap(page);
277 unlock_page(page);
278 return res;
281 #ifdef KERNEL_2_6
283 static int read_pages_copyout(struct fuse_req *req, const char *buf,
284 size_t nbytes)
286 unsigned i;
287 unsigned long base_index = req->pages[0]->index;
288 for (i = 0; i < req->num_pages; i++) {
289 struct page *page = req->pages[i];
290 unsigned long offset;
291 unsigned count;
292 char *tmpbuf;
293 int err;
295 offset = (page->index - base_index) * PAGE_CACHE_SIZE;
296 if (offset >= nbytes)
297 count = 0;
298 else if (offset + PAGE_CACHE_SIZE <= nbytes)
299 count = PAGE_CACHE_SIZE;
300 else
301 count = nbytes - offset;
303 tmpbuf = kmap(page);
304 err = 0;
305 if (count)
306 err = copy_from_user(tmpbuf, buf + offset, count);
307 if (count < PAGE_CACHE_SIZE)
308 memset(tmpbuf + count, 0, PAGE_CACHE_SIZE - count);
309 kunmap(page);
310 if (err)
311 return -EFAULT;
313 flush_dcache_page(page);
314 SetPageUptodate(page);
316 return 0;
319 static void read_pages_end(struct fuse_conn *fc, struct fuse_req *req)
321 unsigned i;
323 for (i = 0; i < req->num_pages; i++)
324 unlock_page(req->pages[i]);
326 fuse_put_request(fc, req);
329 static void fuse_send_readpages(struct fuse_req *req, struct file *file,
330 struct inode *inode)
332 struct fuse_conn *fc = INO_FC(inode);
333 struct fuse_file *ff = file->private_data;
334 struct fuse_read_in *inarg;
335 loff_t pos;
336 unsigned numpages;
338 pos = (loff_t) req->pages[0]->index << PAGE_CACHE_SHIFT;
339 /* Allow for holes between the pages */
340 numpages = req->pages[req->num_pages - 1]->index + 1
341 - req->pages[0]->index;
343 inarg = &req->misc.read_in;
344 inarg->fh = ff->fh;
345 inarg->offset = pos;
346 inarg->size = numpages * PAGE_CACHE_SIZE;
347 req->in.h.opcode = FUSE_READ;
348 req->in.h.ino = inode->i_ino;
349 req->in.numargs = 1;
350 req->in.args[0].size = sizeof(struct fuse_read_in);
351 req->in.args[0].value = inarg;
352 req->copy_out = read_pages_copyout;
353 request_send_nonblock(fc, req, read_pages_end, NULL);
356 struct fuse_readpages_data {
357 struct fuse_req *req;
358 struct file *file;
359 struct inode *inode;
362 static int fuse_readpages_fill(void *_data, struct page *page)
364 struct fuse_readpages_data *data = _data;
365 struct fuse_req *req = data->req;
366 struct inode *inode = data->inode;
367 struct fuse_conn *fc = INO_FC(inode);
369 if (req->num_pages &&
370 (req->num_pages == FUSE_MAX_PAGES_PER_REQ ||
371 (req->num_pages + 1) * PAGE_CACHE_SIZE > fc->max_read ||
372 req->pages[req->num_pages - 1]->index + 1 != page->index)) {
373 struct fuse_conn *fc = INO_FC(page->mapping->host);
374 fuse_send_readpages(req, data->file, inode);
375 data->req = req = fuse_get_request_nonint(fc);
377 req->pages[req->num_pages] = page;
378 req->num_pages ++;
379 return 0;
382 static int fuse_readpages(struct file *file, struct address_space *mapping,
383 struct list_head *pages, unsigned nr_pages)
385 struct inode *inode = mapping->host;
386 struct fuse_conn *fc = INO_FC(inode);
387 struct fuse_readpages_data data;
389 data.req = fuse_get_request_nonint(fc);
390 data.file = file;
391 data.inode = inode;
393 read_cache_pages(mapping, pages, fuse_readpages_fill, &data);
394 if (data.req->num_pages)
395 fuse_send_readpages(data.req, file, inode);
396 else
397 fuse_put_request(fc, data.req);
399 return 0;
401 #endif
403 #ifndef KERNEL_2_6
404 static int fuse_is_block_uptodate(struct inode *inode, size_t bl_index)
406 size_t index = bl_index << FUSE_BLOCK_PAGE_SHIFT;
407 size_t end_index = ((bl_index + 1) << FUSE_BLOCK_PAGE_SHIFT) - 1;
408 size_t file_end_index = i_size_read(inode) >> PAGE_CACHE_SHIFT;
410 if (end_index > file_end_index)
411 end_index = file_end_index;
413 for (; index <= end_index; index++) {
414 struct page *page = find_get_page(inode->i_mapping, index);
416 if (!page)
417 return 0;
419 if (!PageUptodate(page)) {
420 page_cache_release(page);
421 return 0;
424 page_cache_release(page);
427 return 1;
431 static int fuse_cache_block(struct inode *inode, char *bl_buf,
432 size_t bl_index)
434 size_t start_index = bl_index << FUSE_BLOCK_PAGE_SHIFT;
435 size_t end_index = ((bl_index + 1) << FUSE_BLOCK_PAGE_SHIFT) - 1;
436 size_t file_end_index = i_size_read(inode) >> PAGE_CACHE_SHIFT;
438 int i;
440 if (end_index > file_end_index)
441 end_index = file_end_index;
443 for (i = 0; start_index + i <= end_index; i++) {
444 size_t index = start_index + i;
445 struct page *page;
446 char *buffer;
448 page = grab_cache_page(inode->i_mapping, index);
449 if (!page)
450 return -1;
452 if (!PageUptodate(page)) {
453 buffer = kmap(page);
454 memcpy(buffer, bl_buf + i * PAGE_CACHE_SIZE,
455 PAGE_CACHE_SIZE);
456 flush_dcache_page(page);
457 SetPageUptodate(page);
458 kunmap(page);
461 unlock_page(page);
462 page_cache_release(page);
465 return 0;
468 static int fuse_file_read_block(struct file *file, struct inode *inode,
469 char *bl_buf, size_t bl_index)
471 ssize_t res;
472 loff_t offset;
474 offset = (loff_t) bl_index << FUSE_BLOCK_SHIFT;
475 res = fuse_send_read(file, inode, bl_buf, offset, FUSE_BLOCK_SIZE);
476 if (res >= 0) {
477 if (res < FUSE_BLOCK_SIZE)
478 memset(bl_buf + res, 0, FUSE_BLOCK_SIZE - res);
479 res = 0;
481 return res;
484 static void fuse_file_bigread(struct file *file, struct inode *inode,
485 loff_t pos, size_t count)
487 size_t bl_index = pos >> FUSE_BLOCK_SHIFT;
488 size_t bl_end_index = (pos + count) >> FUSE_BLOCK_SHIFT;
489 size_t bl_file_end_index = i_size_read(inode) >> FUSE_BLOCK_SHIFT;
491 if (bl_end_index > bl_file_end_index)
492 bl_end_index = bl_file_end_index;
494 while (bl_index <= bl_end_index) {
495 int res;
496 char *bl_buf = kmalloc(FUSE_BLOCK_SIZE, GFP_KERNEL);
497 if (!bl_buf)
498 break;
499 res = fuse_is_block_uptodate(inode, bl_index);
500 if (!res)
501 res = fuse_file_read_block(file, inode, bl_buf,
502 bl_index);
503 if (!res)
504 fuse_cache_block(inode, bl_buf, bl_index);
505 kfree(bl_buf);
506 bl_index++;
509 #endif
511 static ssize_t fuse_read(struct file *file, char *buf, size_t count,
512 loff_t *ppos)
514 struct inode *inode = file->f_dentry->d_inode;
515 struct fuse_conn *fc = INO_FC(inode);
516 char *tmpbuf;
517 ssize_t res = 0;
518 loff_t pos = *ppos;
519 unsigned int max_read = count < fc->max_read ? count : fc->max_read;
521 do {
522 tmpbuf = kmalloc(max_read, GFP_KERNEL);
523 if (tmpbuf)
524 break;
526 max_read /= 2;
527 } while (max_read > PAGE_CACHE_SIZE / 4);
528 if (!tmpbuf)
529 return -ENOMEM;
531 while (count) {
532 size_t nbytes = count < max_read ? count : max_read;
533 ssize_t res1;
534 res1 = fuse_send_read(file, inode, tmpbuf, pos, nbytes);
535 if (res1 < 0) {
536 if (!res)
537 res = res1;
538 break;
540 res += res1;
541 if (copy_to_user(buf, tmpbuf, res1)) {
542 res = -EFAULT;
543 break;
545 count -= res1;
546 buf += res1;
547 pos += res1;
548 if (res1 < nbytes)
549 break;
551 kfree(tmpbuf);
553 if (res > 0)
554 *ppos += res;
556 return res;
559 static ssize_t fuse_file_read(struct file *file, char *buf,
560 size_t count, loff_t * ppos)
562 struct inode *inode = file->f_dentry->d_inode;
563 struct fuse_conn *fc = INO_FC(inode);
564 ssize_t res;
566 if (fc->flags & FUSE_DIRECT_IO) {
567 res = fuse_read(file, buf, count, ppos);
569 else {
570 #ifndef KERNEL_2_6
571 if (fc->flags & FUSE_LARGE_READ) {
572 down(&inode->i_sem);
573 fuse_file_bigread(file, inode, *ppos, count);
574 up(&inode->i_sem);
576 #endif
577 res = generic_file_read(file, buf, count, ppos);
580 return res;
583 static ssize_t fuse_send_write(struct fuse_req *req, int writepage,
584 struct fuse_file *ff, struct inode *inode,
585 const char *buf, loff_t pos, size_t count)
587 struct fuse_conn *fc = INO_FC(inode);
588 struct fuse_write_in inarg;
589 struct fuse_write_out outarg;
590 ssize_t res;
592 memset(&inarg, 0, sizeof(inarg));
593 inarg.writepage = writepage;
594 inarg.fh = ff->fh;
595 inarg.offset = pos;
596 inarg.size = count;
597 req->in.h.opcode = FUSE_WRITE;
598 req->in.h.ino = inode->i_ino;
599 if (writepage) {
600 req->in.h.uid = 0;
601 req->in.h.gid = 0;
602 req->in.h.pid = 0;
604 req->in.numargs = 2;
605 req->in.args[0].size = sizeof(inarg);
606 req->in.args[0].value = &inarg;
607 req->in.args[1].size = count;
608 req->in.args[1].value = buf;
609 req->out.numargs = 1;
610 req->out.args[0].size = sizeof(outarg);
611 req->out.args[0].value = &outarg;
612 request_send(fc, req);
613 res = req->out.h.error;
614 if (!res) {
615 if (outarg.size > count)
616 return -EPROTO;
617 else
618 return outarg.size;
620 else
621 return res;
624 static int write_buffer(struct inode *inode, struct file *file,
625 struct page *page, unsigned offset, size_t count)
627 struct fuse_conn *fc = INO_FC(inode);
628 struct fuse_file *ff = file->private_data;
629 char *buffer;
630 ssize_t res;
631 loff_t pos;
632 struct fuse_req *req;
634 req = fuse_get_request(fc);
635 if (!req)
636 return -ERESTARTSYS;
638 pos = ((loff_t) page->index << PAGE_CACHE_SHIFT) + offset;
639 buffer = kmap(page);
640 res = fuse_send_write(req, 0, ff, inode, buffer + offset, pos, count);
641 fuse_put_request(fc, req);
642 if (res >= 0) {
643 if (res < count) {
644 printk("fuse: short write\n");
645 res = -EPROTO;
646 } else
647 res = 0;
649 kunmap(page);
650 if (res)
651 SetPageError(page);
652 return res;
655 static int get_write_count(struct inode *inode, struct page *page)
657 unsigned long end_index;
658 loff_t size = i_size_read(inode);
659 int count;
661 end_index = size >> PAGE_CACHE_SHIFT;
662 if (page->index < end_index)
663 count = PAGE_CACHE_SIZE;
664 else {
665 count = size & (PAGE_CACHE_SIZE - 1);
666 if (page->index > end_index || count == 0)
667 return 0;
669 return count;
673 static int write_page_block(struct inode *inode, struct page *page)
675 struct fuse_conn *fc = INO_FC(inode);
676 struct fuse_inode *fi = INO_FI(inode);
677 char *buffer;
678 ssize_t res;
679 loff_t pos;
680 unsigned count;
681 struct fuse_req *req;
683 req = fuse_get_request(fc);
684 if (!req)
685 return -ERESTARTSYS;
687 down_read(&fi->write_sem);
688 count = get_write_count(inode, page);
689 res = 0;
690 if (count) {
691 struct fuse_file *ff;
692 BUG_ON(list_empty(&fi->write_files));
693 ff = list_entry(fi->write_files.next, struct fuse_file, ff_list);
694 pos = ((loff_t) page->index << PAGE_CACHE_SHIFT);
695 buffer = kmap(page);
696 res = fuse_send_write(req, 1, ff, inode, buffer, pos, count);
697 if (res >= 0) {
698 if (res < count) {
699 printk("fuse: short write\n");
700 res = -EPROTO;
701 } else
702 res = 0;
705 up_read(&fi->write_sem);
706 fuse_put_request(fc, req);
707 kunmap(page);
708 if (res)
709 SetPageError(page);
710 return res;
714 #ifdef KERNEL_2_6
717 static void write_page_nonblock_end(struct fuse_conn *fc, struct fuse_req *req)
719 struct page *page = (struct page *) req->data;
720 struct inode *inode = page->mapping->host;
721 struct fuse_inode *fi = INO_FI(inode);
722 struct fuse_write_out *outarg = req->out.args[0].value;
723 if (!req->out.h.error && outarg->size != req->in.args[1].size) {
724 printk("fuse: short write\n");
725 req->out.h.error = -EPROTO;
728 if (req->out.h.error) {
729 SetPageError(page);
730 if (req->out.h.error == -ENOSPC)
731 set_bit(AS_ENOSPC, &page->mapping->flags);
732 else
733 set_bit(AS_EIO, &page->mapping->flags);
735 up_read(&fi->write_sem);
737 end_page_writeback(page);
738 kunmap(page);
739 fuse_put_request(fc, req);
742 static void send_write_nonblock(struct fuse_req *req, struct inode *inode,
743 struct page *page, unsigned count)
745 struct fuse_conn *fc = INO_FC(inode);
746 struct fuse_inode *fi = INO_FI(inode);
747 struct fuse_write_in *inarg;
748 struct fuse_file *ff;
749 char *buffer;
751 BUG_ON(list_empty(&fi->write_files));
752 ff = list_entry(fi->write_files.next, struct fuse_file, ff_list);
754 inarg = &req->misc.write.in;
755 buffer = kmap(page);
756 inarg->writepage = 1;
757 inarg->fh = ff->fh;
758 inarg->offset = ((loff_t) page->index << PAGE_CACHE_SHIFT);
759 inarg->size = count;
760 req->in.h.opcode = FUSE_WRITE;
761 req->in.h.ino = inode->i_ino;
762 req->in.h.uid = 0;
763 req->in.h.gid = 0;
764 req->in.h.pid = 0;
765 req->in.numargs = 2;
766 req->in.args[0].size = sizeof(struct fuse_write_in);
767 req->in.args[0].value = inarg;
768 req->in.args[1].size = count;
769 req->in.args[1].value = buffer;
770 req->out.numargs = 1;
771 req->out.args[0].size = sizeof(struct fuse_write_out);
772 req->out.args[0].value = &req->misc.write.out;
773 request_send_nonblock(fc, req, write_page_nonblock_end, page);
776 static int write_page_nonblock(struct inode *inode, struct page *page)
778 struct fuse_conn *fc = INO_FC(inode);
779 struct fuse_inode *fi = INO_FI(inode);
780 struct fuse_req *req;
781 int err;
783 err = -EWOULDBLOCK;
784 req = fuse_get_request_nonblock(fc);
785 if (req) {
786 if (down_read_trylock(&fi->write_sem)) {
787 unsigned count;
788 err = 0;
789 count = get_write_count(inode, page);
790 if (count) {
791 SetPageWriteback(page);
792 send_write_nonblock(req, inode, page, count);
793 return 0;
795 up_read(&fi->write_sem);
797 fuse_put_request(fc, req);
799 return err;
802 static int fuse_writepage(struct page *page, struct writeback_control *wbc)
804 int err;
805 struct inode *inode = page->mapping->host;
807 if (wbc->nonblocking) {
808 err = write_page_nonblock(inode, page);
809 if (err == -EWOULDBLOCK) {
810 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,6)
811 redirty_page_for_writepage(wbc, page);
812 #else
813 __set_page_dirty_nobuffers(page);
814 #endif
815 err = 0;
817 } else
818 err = write_page_block(inode, page);
820 unlock_page(page);
821 return err;
823 #else
824 static int fuse_writepage(struct page *page)
826 int err = write_page_block(page->mapping->host, page);
827 unlock_page(page);
828 return err;
830 #endif
832 static int fuse_prepare_write(struct file *file, struct page *page,
833 unsigned offset, unsigned to)
835 /* No op */
836 return 0;
839 static int fuse_commit_write(struct file *file, struct page *page,
840 unsigned offset, unsigned to)
842 int err;
843 struct inode *inode = page->mapping->host;
845 err = write_buffer(inode, file, page, offset, to - offset);
846 if (!err) {
847 loff_t pos = (page->index << PAGE_CACHE_SHIFT) + to;
848 if (pos > i_size_read(inode))
849 i_size_write(inode, pos);
851 if (offset == 0 && to == PAGE_CACHE_SIZE) {
852 #ifdef KERNEL_2_6
853 clear_page_dirty(page);
854 #else
855 ClearPageDirty(page);
856 #endif
857 SetPageUptodate(page);
861 return err;
864 static ssize_t fuse_write(struct file *file, const char *buf, size_t count,
865 loff_t *ppos)
867 struct inode *inode = file->f_dentry->d_inode;
868 struct fuse_conn *fc = INO_FC(inode);
869 struct fuse_file *ff = file->private_data;
870 char *tmpbuf;
871 ssize_t res = 0;
872 loff_t pos = *ppos;
873 struct fuse_req *req;
875 req = fuse_get_request(fc);
876 if (!req)
877 return -ERESTARTSYS;
879 tmpbuf = kmalloc(count < fc->max_write ? count : fc->max_write,
880 GFP_KERNEL);
881 if (!tmpbuf) {
882 fuse_put_request(fc, req);
883 return -ENOMEM;
886 while (count) {
887 size_t nbytes = count < fc->max_write ? count : fc->max_write;
888 ssize_t res1;
889 if (copy_from_user(tmpbuf, buf, nbytes)) {
890 res = -EFAULT;
891 break;
893 res1 = fuse_send_write(req, 0, ff, inode, tmpbuf, pos, nbytes);
894 if (res1 < 0) {
895 res = res1;
896 break;
898 res += res1;
899 count -= res1;
900 buf += res1;
901 pos += res1;
902 if (res1 < nbytes)
903 break;
905 if (count)
906 fuse_reset_request(req);
908 kfree(tmpbuf);
909 fuse_put_request(fc, req);
911 if (res > 0) {
912 if (pos > i_size_read(inode))
913 i_size_write(inode, pos);
914 *ppos = pos;
917 return res;
920 static ssize_t fuse_file_write(struct file *file, const char *buf,
921 size_t count, loff_t *ppos)
923 struct inode *inode = file->f_dentry->d_inode;
924 struct fuse_conn *fc = INO_FC(inode);
926 if (fc->flags & FUSE_DIRECT_IO) {
927 ssize_t res;
928 down(&inode->i_sem);
929 res = fuse_write(file, buf, count, ppos);
930 up(&inode->i_sem);
931 return res;
933 else
934 return generic_file_write(file, buf, count, ppos);
937 static int fuse_file_mmap(struct file *file, struct vm_area_struct *vma)
939 struct inode *inode = file->f_dentry->d_inode;
940 struct fuse_conn *fc = INO_FC(inode);
942 if (fc->flags & FUSE_DIRECT_IO)
943 return -ENODEV;
944 else {
945 if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) ==
946 (VM_WRITE | VM_SHARED)) {
947 struct fuse_inode *fi = INO_FI(inode);
948 struct fuse_file *ff = file->private_data;
950 if (!user_mmap && current->uid != 0)
951 return -EPERM;
953 down_write(&fi->write_sem);
954 if (list_empty(&ff->ff_list))
955 list_add(&ff->ff_list, &fi->write_files);
956 up_write(&fi->write_sem);
958 return generic_file_mmap(file, vma);
962 static struct file_operations fuse_file_operations = {
963 .read = fuse_file_read,
964 .write = fuse_file_write,
965 .mmap = fuse_file_mmap,
966 .open = fuse_open,
967 .flush = fuse_flush,
968 .release = fuse_release,
969 .fsync = fuse_fsync,
970 #ifdef KERNEL_2_6
971 .sendfile = generic_file_sendfile,
972 #endif
975 static struct address_space_operations fuse_file_aops = {
976 .readpage = fuse_readpage,
977 .writepage = fuse_writepage,
978 .prepare_write = fuse_prepare_write,
979 .commit_write = fuse_commit_write,
980 #ifdef KERNEL_2_6
981 .readpages = fuse_readpages,
982 .set_page_dirty = __set_page_dirty_nobuffers,
983 #endif
986 void fuse_init_file_inode(struct inode *inode)
988 inode->i_fop = &fuse_file_operations;
989 inode->i_data.a_ops = &fuse_file_aops;
993 * Local Variables:
994 * indent-tabs-mode: t
995 * c-basic-offset: 8
996 * End: