2 * Copyright (C) 2005, 2006
3 * Avishay Traeger (avishay@gmail.com)
4 * Copyright (C) 2008, 2009
5 * Boaz Harrosh <ooo@electrozaur.com>
7 * Copyrights for code taken from ext2:
8 * Copyright (C) 1992, 1993, 1994, 1995
9 * Remy Card (card@masi.ibp.fr)
10 * Laboratoire MASI - Institut Blaise Pascal
11 * Universite Pierre et Marie Curie (Paris VI)
13 * linux/fs/minix/inode.c
14 * Copyright (C) 1991, 1992 Linus Torvalds
16 * This file is part of exofs.
18 * exofs is free software; you can redistribute it and/or modify
19 * it under the terms of the GNU General Public License as published by
20 * the Free Software Foundation. Since it is based on ext2, and the only
21 * valid version of GPL for the Linux kernel is version 2, the only valid
22 * version of GPL for exofs is version 2.
24 * exofs is distributed in the hope that it will be useful,
25 * but WITHOUT ANY WARRANTY; without even the implied warranty of
26 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
27 * GNU General Public License for more details.
29 * You should have received a copy of the GNU General Public License
30 * along with exofs; if not, write to the Free Software
31 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
34 #include <linux/slab.h>
38 #define EXOFS_DBGMSG2(M...) do {} while (0)
40 unsigned exofs_max_io_pages(struct ore_layout
*layout
,
41 unsigned expected_pages
)
43 unsigned pages
= min_t(unsigned, expected_pages
,
44 layout
->max_io_length
/ PAGE_SIZE
);
50 struct exofs_sb_info
*sbi
;
52 unsigned expected_pages
;
53 struct ore_io_state
*ios
;
59 loff_t pg_first
; /* keep 64bit also in 32-arches */
60 bool read_4_write
; /* This means two things: that the read is sync
61 * And the pages should not be unlocked.
63 struct page
*that_locked_page
;
66 static void _pcol_init(struct page_collect
*pcol
, unsigned expected_pages
,
69 struct exofs_sb_info
*sbi
= inode
->i_sb
->s_fs_info
;
73 pcol
->expected_pages
= expected_pages
;
77 pcol
->alloc_pages
= 0;
81 pcol
->read_4_write
= false;
82 pcol
->that_locked_page
= NULL
;
85 static void _pcol_reset(struct page_collect
*pcol
)
87 pcol
->expected_pages
-= min(pcol
->nr_pages
, pcol
->expected_pages
);
90 pcol
->alloc_pages
= 0;
95 pcol
->that_locked_page
= NULL
;
97 /* this is probably the end of the loop but in writes
98 * it might not end here. don't be left with nothing
100 if (!pcol
->expected_pages
)
101 pcol
->expected_pages
=
102 exofs_max_io_pages(&pcol
->sbi
->layout
, ~0);
105 static int pcol_try_alloc(struct page_collect
*pcol
)
109 /* TODO: easily support bio chaining */
110 pages
= exofs_max_io_pages(&pcol
->sbi
->layout
, pcol
->expected_pages
);
112 for (; pages
; pages
>>= 1) {
113 pcol
->pages
= kmalloc(pages
* sizeof(struct page
*),
115 if (likely(pcol
->pages
)) {
116 pcol
->alloc_pages
= pages
;
121 EXOFS_ERR("Failed to kmalloc expected_pages=%u\n",
122 pcol
->expected_pages
);
126 static void pcol_free(struct page_collect
*pcol
)
132 ore_put_io_state(pcol
->ios
);
137 static int pcol_add_page(struct page_collect
*pcol
, struct page
*page
,
140 if (unlikely(pcol
->nr_pages
>= pcol
->alloc_pages
))
143 pcol
->pages
[pcol
->nr_pages
++] = page
;
148 enum {PAGE_WAS_NOT_IN_IO
= 17};
149 static int update_read_page(struct page
*page
, int ret
)
153 /* Everything is OK */
154 SetPageUptodate(page
);
156 ClearPageError(page
);
159 /* In this case we were trying to read something that wasn't on
160 * disk yet - return a page full of zeroes. This should be OK,
161 * because the object should be empty (if there was a write
162 * before this read, the read would be waiting with the page
164 clear_highpage(page
);
166 SetPageUptodate(page
);
168 ClearPageError(page
);
169 EXOFS_DBGMSG("recovered read error\n");
171 case PAGE_WAS_NOT_IN_IO
:
172 ret
= 0; /* recovered error */
180 static void update_write_page(struct page
*page
, int ret
)
182 if (unlikely(ret
== PAGE_WAS_NOT_IN_IO
))
183 return; /* don't pass start don't collect $200 */
186 mapping_set_error(page
->mapping
, ret
);
189 end_page_writeback(page
);
192 /* Called at the end of reads, to optionally unlock pages and update their
195 static int __readpages_done(struct page_collect
*pcol
)
200 int ret
= ore_check_io(pcol
->ios
, NULL
);
203 good_bytes
= pcol
->length
;
204 ret
= PAGE_WAS_NOT_IN_IO
;
209 EXOFS_DBGMSG2("readpages_done(0x%lx) good_bytes=0x%llx"
210 " length=0x%lx nr_pages=%u\n",
211 pcol
->inode
->i_ino
, _LLU(good_bytes
), pcol
->length
,
214 for (i
= 0; i
< pcol
->nr_pages
; i
++) {
215 struct page
*page
= pcol
->pages
[i
];
216 struct inode
*inode
= page
->mapping
->host
;
219 if (inode
!= pcol
->inode
)
220 continue; /* osd might add more pages at end */
222 if (likely(length
< good_bytes
))
227 EXOFS_DBGMSG2(" readpages_done(0x%lx, 0x%lx) %s\n",
228 inode
->i_ino
, page
->index
,
229 page_stat
? "bad_bytes" : "good_bytes");
231 ret
= update_read_page(page
, page_stat
);
232 if (!pcol
->read_4_write
)
238 EXOFS_DBGMSG2("readpages_done END\n");
242 /* callback of async reads */
243 static void readpages_done(struct ore_io_state
*ios
, void *p
)
245 struct page_collect
*pcol
= p
;
247 __readpages_done(pcol
);
248 atomic_dec(&pcol
->sbi
->s_curr_pending
);
252 static void _unlock_pcol_pages(struct page_collect
*pcol
, int ret
, int rw
)
256 for (i
= 0; i
< pcol
->nr_pages
; i
++) {
257 struct page
*page
= pcol
->pages
[i
];
260 update_read_page(page
, ret
);
262 update_write_page(page
, ret
);
268 static int _maybe_not_all_in_one_io(struct ore_io_state
*ios
,
269 struct page_collect
*pcol_src
, struct page_collect
*pcol
)
271 /* length was wrong or offset was not page aligned */
272 BUG_ON(pcol_src
->nr_pages
< ios
->nr_pages
);
274 if (pcol_src
->nr_pages
> ios
->nr_pages
) {
275 struct page
**src_page
;
276 unsigned pages_less
= pcol_src
->nr_pages
- ios
->nr_pages
;
277 unsigned long len_less
= pcol_src
->length
- ios
->length
;
281 /* This IO was trimmed */
282 pcol_src
->nr_pages
= ios
->nr_pages
;
283 pcol_src
->length
= ios
->length
;
285 /* Left over pages are passed to the next io */
286 pcol
->expected_pages
+= pages_less
;
287 pcol
->nr_pages
= pages_less
;
288 pcol
->length
= len_less
;
289 src_page
= pcol_src
->pages
+ pcol_src
->nr_pages
;
290 pcol
->pg_first
= (*src_page
)->index
;
292 ret
= pcol_try_alloc(pcol
);
296 for (i
= 0; i
< pages_less
; ++i
)
297 pcol
->pages
[i
] = *src_page
++;
299 EXOFS_DBGMSG("Length was adjusted nr_pages=0x%x "
300 "pages_less=0x%x expected_pages=0x%x "
301 "next_offset=0x%llx next_len=0x%lx\n",
302 pcol_src
->nr_pages
, pages_less
, pcol
->expected_pages
,
303 pcol
->pg_first
* PAGE_SIZE
, pcol
->length
);
308 static int read_exec(struct page_collect
*pcol
)
310 struct exofs_i_info
*oi
= exofs_i(pcol
->inode
);
311 struct ore_io_state
*ios
;
312 struct page_collect
*pcol_copy
= NULL
;
319 int ret
= ore_get_rw_state(&pcol
->sbi
->layout
, &oi
->oc
, true,
320 pcol
->pg_first
<< PAGE_CACHE_SHIFT
,
321 pcol
->length
, &pcol
->ios
);
328 ios
->pages
= pcol
->pages
;
330 if (pcol
->read_4_write
) {
332 return __readpages_done(pcol
);
335 pcol_copy
= kmalloc(sizeof(*pcol_copy
), GFP_KERNEL
);
342 ios
->done
= readpages_done
;
343 ios
->private = pcol_copy
;
345 /* pages ownership was passed to pcol_copy */
348 ret
= _maybe_not_all_in_one_io(ios
, pcol_copy
, pcol
);
352 EXOFS_DBGMSG2("read_exec(0x%lx) offset=0x%llx length=0x%llx\n",
353 pcol
->inode
->i_ino
, _LLU(ios
->offset
), _LLU(ios
->length
));
359 atomic_inc(&pcol
->sbi
->s_curr_pending
);
364 if (!pcol_copy
) /* Failed before ownership transfer */
366 _unlock_pcol_pages(pcol_copy
, ret
, READ
);
367 pcol_free(pcol_copy
);
373 /* readpage_strip is called either directly from readpage() or by the VFS from
374 * within read_cache_pages(), to add one more page to be read. It will try to
375 * collect as many contiguous pages as posible. If a discontinuity is
376 * encountered, or it runs out of resources, it will submit the previous segment
377 * and will start a new collection. Eventually caller must submit the last
378 * segment if present.
380 static int readpage_strip(void *data
, struct page
*page
)
382 struct page_collect
*pcol
= data
;
383 struct inode
*inode
= pcol
->inode
;
384 struct exofs_i_info
*oi
= exofs_i(inode
);
385 loff_t i_size
= i_size_read(inode
);
386 pgoff_t end_index
= i_size
>> PAGE_CACHE_SHIFT
;
390 BUG_ON(!PageLocked(page
));
392 /* FIXME: Just for debugging, will be removed */
393 if (PageUptodate(page
))
394 EXOFS_ERR("PageUptodate(0x%lx, 0x%lx)\n", pcol
->inode
->i_ino
,
397 pcol
->that_locked_page
= page
;
399 if (page
->index
< end_index
)
400 len
= PAGE_CACHE_SIZE
;
401 else if (page
->index
== end_index
)
402 len
= i_size
& ~PAGE_CACHE_MASK
;
406 if (!len
|| !obj_created(oi
)) {
407 /* this will be out of bounds, or doesn't exist yet.
408 * Current page is cleared and the request is split
410 clear_highpage(page
);
412 SetPageUptodate(page
);
414 ClearPageError(page
);
416 if (!pcol
->read_4_write
)
418 EXOFS_DBGMSG("readpage_strip(0x%lx) empty page len=%zx "
419 "read_4_write=%d index=0x%lx end_index=0x%lx "
420 "splitting\n", inode
->i_ino
, len
,
421 pcol
->read_4_write
, page
->index
, end_index
);
423 return read_exec(pcol
);
428 if (unlikely(pcol
->pg_first
== -1)) {
429 pcol
->pg_first
= page
->index
;
430 } else if (unlikely((pcol
->pg_first
+ pcol
->nr_pages
) !=
432 /* Discontinuity detected, split the request */
433 ret
= read_exec(pcol
);
440 ret
= pcol_try_alloc(pcol
);
445 if (len
!= PAGE_CACHE_SIZE
)
446 zero_user(page
, len
, PAGE_CACHE_SIZE
- len
);
448 EXOFS_DBGMSG2(" readpage_strip(0x%lx, 0x%lx) len=0x%zx\n",
449 inode
->i_ino
, page
->index
, len
);
451 ret
= pcol_add_page(pcol
, page
, len
);
453 EXOFS_DBGMSG2("Failed pcol_add_page pages[i]=%p "
454 "this_len=0x%zx nr_pages=%u length=0x%lx\n",
455 page
, len
, pcol
->nr_pages
, pcol
->length
);
457 /* split the request, and start again with current page */
458 ret
= read_exec(pcol
);
468 /* SetPageError(page); ??? */
473 static int exofs_readpages(struct file
*file
, struct address_space
*mapping
,
474 struct list_head
*pages
, unsigned nr_pages
)
476 struct page_collect pcol
;
479 _pcol_init(&pcol
, nr_pages
, mapping
->host
);
481 ret
= read_cache_pages(mapping
, pages
, readpage_strip
, &pcol
);
483 EXOFS_ERR("read_cache_pages => %d\n", ret
);
487 ret
= read_exec(&pcol
);
491 return read_exec(&pcol
);
494 static int _readpage(struct page
*page
, bool read_4_write
)
496 struct page_collect pcol
;
499 _pcol_init(&pcol
, 1, page
->mapping
->host
);
501 pcol
.read_4_write
= read_4_write
;
502 ret
= readpage_strip(&pcol
, page
);
504 EXOFS_ERR("_readpage => %d\n", ret
);
508 return read_exec(&pcol
);
512 * We don't need the file
514 static int exofs_readpage(struct file
*file
, struct page
*page
)
516 return _readpage(page
, false);
519 /* Callback for osd_write. All writes are asynchronous */
520 static void writepages_done(struct ore_io_state
*ios
, void *p
)
522 struct page_collect
*pcol
= p
;
526 int ret
= ore_check_io(ios
, NULL
);
528 atomic_dec(&pcol
->sbi
->s_curr_pending
);
531 good_bytes
= pcol
->length
;
532 ret
= PAGE_WAS_NOT_IN_IO
;
537 EXOFS_DBGMSG2("writepages_done(0x%lx) good_bytes=0x%llx"
538 " length=0x%lx nr_pages=%u\n",
539 pcol
->inode
->i_ino
, _LLU(good_bytes
), pcol
->length
,
542 for (i
= 0; i
< pcol
->nr_pages
; i
++) {
543 struct page
*page
= pcol
->pages
[i
];
544 struct inode
*inode
= page
->mapping
->host
;
547 if (inode
!= pcol
->inode
)
548 continue; /* osd might add more pages to a bio */
550 if (likely(length
< good_bytes
))
555 update_write_page(page
, page_stat
);
557 EXOFS_DBGMSG2(" writepages_done(0x%lx, 0x%lx) status=%d\n",
558 inode
->i_ino
, page
->index
, page_stat
);
565 EXOFS_DBGMSG2("writepages_done END\n");
568 static struct page
*__r4w_get_page(void *priv
, u64 offset
, bool *uptodate
)
570 struct page_collect
*pcol
= priv
;
571 pgoff_t index
= offset
/ PAGE_SIZE
;
573 if (!pcol
->that_locked_page
||
574 (pcol
->that_locked_page
->index
!= index
)) {
576 loff_t i_size
= i_size_read(pcol
->inode
);
578 if (offset
>= i_size
) {
580 EXOFS_DBGMSG2("offset >= i_size index=0x%lx\n", index
);
584 page
= find_get_page(pcol
->inode
->i_mapping
, index
);
586 page
= find_or_create_page(pcol
->inode
->i_mapping
,
588 if (unlikely(!page
)) {
589 EXOFS_DBGMSG("grab_cache_page Failed "
590 "index=0x%llx\n", _LLU(index
));
595 *uptodate
= PageUptodate(page
);
596 EXOFS_DBGMSG2("index=0x%lx uptodate=%d\n", index
, *uptodate
);
599 EXOFS_DBGMSG2("YES that_locked_page index=0x%lx\n",
600 pcol
->that_locked_page
->index
);
602 return pcol
->that_locked_page
;
606 static void __r4w_put_page(void *priv
, struct page
*page
)
608 struct page_collect
*pcol
= priv
;
610 if ((pcol
->that_locked_page
!= page
) && (ZERO_PAGE(0) != page
)) {
611 EXOFS_DBGMSG2("index=0x%lx\n", page
->index
);
612 page_cache_release(page
);
615 EXOFS_DBGMSG2("that_locked_page index=0x%lx\n",
616 ZERO_PAGE(0) == page
? -1 : page
->index
);
619 static const struct _ore_r4w_op _r4w_op
= {
620 .get_page
= &__r4w_get_page
,
621 .put_page
= &__r4w_put_page
,
624 static int write_exec(struct page_collect
*pcol
)
626 struct exofs_i_info
*oi
= exofs_i(pcol
->inode
);
627 struct ore_io_state
*ios
;
628 struct page_collect
*pcol_copy
= NULL
;
635 ret
= ore_get_rw_state(&pcol
->sbi
->layout
, &oi
->oc
, false,
636 pcol
->pg_first
<< PAGE_CACHE_SHIFT
,
637 pcol
->length
, &pcol
->ios
);
641 pcol_copy
= kmalloc(sizeof(*pcol_copy
), GFP_KERNEL
);
643 EXOFS_ERR("write_exec: Failed to kmalloc(pcol)\n");
651 ios
->pages
= pcol_copy
->pages
;
652 ios
->done
= writepages_done
;
654 ios
->private = pcol_copy
;
656 /* pages ownership was passed to pcol_copy */
659 ret
= _maybe_not_all_in_one_io(ios
, pcol_copy
, pcol
);
663 EXOFS_DBGMSG2("write_exec(0x%lx) offset=0x%llx length=0x%llx\n",
664 pcol
->inode
->i_ino
, _LLU(ios
->offset
), _LLU(ios
->length
));
666 ret
= ore_write(ios
);
668 EXOFS_ERR("write_exec: ore_write() Failed\n");
672 atomic_inc(&pcol
->sbi
->s_curr_pending
);
676 if (!pcol_copy
) /* Failed before ownership transfer */
678 _unlock_pcol_pages(pcol_copy
, ret
, WRITE
);
679 pcol_free(pcol_copy
);
685 /* writepage_strip is called either directly from writepage() or by the VFS from
686 * within write_cache_pages(), to add one more page to be written to storage.
687 * It will try to collect as many contiguous pages as possible. If a
688 * discontinuity is encountered or it runs out of resources it will submit the
689 * previous segment and will start a new collection.
690 * Eventually caller must submit the last segment if present.
692 static int writepage_strip(struct page
*page
,
693 struct writeback_control
*wbc_unused
, void *data
)
695 struct page_collect
*pcol
= data
;
696 struct inode
*inode
= pcol
->inode
;
697 struct exofs_i_info
*oi
= exofs_i(inode
);
698 loff_t i_size
= i_size_read(inode
);
699 pgoff_t end_index
= i_size
>> PAGE_CACHE_SHIFT
;
703 BUG_ON(!PageLocked(page
));
705 ret
= wait_obj_created(oi
);
709 if (page
->index
< end_index
)
710 /* in this case, the page is within the limits of the file */
711 len
= PAGE_CACHE_SIZE
;
713 len
= i_size
& ~PAGE_CACHE_MASK
;
715 if (page
->index
> end_index
|| !len
) {
716 /* in this case, the page is outside the limits
717 * (truncate in progress)
719 ret
= write_exec(pcol
);
723 ClearPageError(page
);
725 EXOFS_DBGMSG("writepage_strip(0x%lx, 0x%lx) "
726 "outside the limits\n",
727 inode
->i_ino
, page
->index
);
734 if (unlikely(pcol
->pg_first
== -1)) {
735 pcol
->pg_first
= page
->index
;
736 } else if (unlikely((pcol
->pg_first
+ pcol
->nr_pages
) !=
738 /* Discontinuity detected, split the request */
739 ret
= write_exec(pcol
);
743 EXOFS_DBGMSG("writepage_strip(0x%lx, 0x%lx) Discontinuity\n",
744 inode
->i_ino
, page
->index
);
749 ret
= pcol_try_alloc(pcol
);
754 EXOFS_DBGMSG2(" writepage_strip(0x%lx, 0x%lx) len=0x%zx\n",
755 inode
->i_ino
, page
->index
, len
);
757 ret
= pcol_add_page(pcol
, page
, len
);
759 EXOFS_DBGMSG2("Failed pcol_add_page "
760 "nr_pages=%u total_length=0x%lx\n",
761 pcol
->nr_pages
, pcol
->length
);
763 /* split the request, next loop will start again */
764 ret
= write_exec(pcol
);
766 EXOFS_DBGMSG("write_exec failed => %d", ret
);
773 BUG_ON(PageWriteback(page
));
774 set_page_writeback(page
);
779 EXOFS_DBGMSG("Error: writepage_strip(0x%lx, 0x%lx)=>%d\n",
780 inode
->i_ino
, page
->index
, ret
);
781 set_bit(AS_EIO
, &page
->mapping
->flags
);
786 static int exofs_writepages(struct address_space
*mapping
,
787 struct writeback_control
*wbc
)
789 struct page_collect pcol
;
790 long start
, end
, expected_pages
;
793 start
= wbc
->range_start
>> PAGE_CACHE_SHIFT
;
794 end
= (wbc
->range_end
== LLONG_MAX
) ?
795 start
+ mapping
->nrpages
:
796 wbc
->range_end
>> PAGE_CACHE_SHIFT
;
799 expected_pages
= end
- start
+ 1;
801 expected_pages
= mapping
->nrpages
;
803 if (expected_pages
< 32L)
804 expected_pages
= 32L;
806 EXOFS_DBGMSG2("inode(0x%lx) wbc->start=0x%llx wbc->end=0x%llx "
807 "nrpages=%lu start=0x%lx end=0x%lx expected_pages=%ld\n",
808 mapping
->host
->i_ino
, wbc
->range_start
, wbc
->range_end
,
809 mapping
->nrpages
, start
, end
, expected_pages
);
811 _pcol_init(&pcol
, expected_pages
, mapping
->host
);
813 ret
= write_cache_pages(mapping
, wbc
, writepage_strip
, &pcol
);
815 EXOFS_ERR("write_cache_pages => %d\n", ret
);
819 ret
= write_exec(&pcol
);
823 if (wbc
->sync_mode
== WB_SYNC_ALL
) {
824 return write_exec(&pcol
); /* pump the last reminder */
825 } else if (pcol
.nr_pages
) {
826 /* not SYNC let the reminder join the next writeout */
829 for (i
= 0; i
< pcol
.nr_pages
; i
++) {
830 struct page
*page
= pcol
.pages
[i
];
832 end_page_writeback(page
);
833 set_page_dirty(page
);
841 static int exofs_writepage(struct page *page, struct writeback_control *wbc)
843 struct page_collect pcol;
846 _pcol_init(&pcol, 1, page->mapping->host);
848 ret = writepage_strip(page, NULL, &pcol);
850 EXOFS_ERR("exofs_writepage => %d\n", ret);
854 return write_exec(&pcol);
857 /* i_mutex held using inode->i_size directly */
858 static void _write_failed(struct inode
*inode
, loff_t to
)
860 if (to
> inode
->i_size
)
861 truncate_pagecache(inode
, inode
->i_size
);
864 int exofs_write_begin(struct file
*file
, struct address_space
*mapping
,
865 loff_t pos
, unsigned len
, unsigned flags
,
866 struct page
**pagep
, void **fsdata
)
873 ret
= simple_write_begin(file
, mapping
, pos
, len
, flags
, pagep
,
876 EXOFS_DBGMSG("simple_write_begin failed\n");
883 /* read modify write */
884 if (!PageUptodate(page
) && (len
!= PAGE_CACHE_SIZE
)) {
885 loff_t i_size
= i_size_read(mapping
->host
);
886 pgoff_t end_index
= i_size
>> PAGE_CACHE_SHIFT
;
889 if (page
->index
< end_index
)
890 rlen
= PAGE_CACHE_SIZE
;
891 else if (page
->index
== end_index
)
892 rlen
= i_size
& ~PAGE_CACHE_MASK
;
897 clear_highpage(page
);
898 SetPageUptodate(page
);
902 ret
= _readpage(page
, true);
904 /*SetPageError was done by _readpage. Is it ok?*/
906 EXOFS_DBGMSG("__readpage failed\n");
911 _write_failed(mapping
->host
, pos
+ len
);
916 static int exofs_write_begin_export(struct file
*file
,
917 struct address_space
*mapping
,
918 loff_t pos
, unsigned len
, unsigned flags
,
919 struct page
**pagep
, void **fsdata
)
923 return exofs_write_begin(file
, mapping
, pos
, len
, flags
, pagep
,
927 static int exofs_write_end(struct file
*file
, struct address_space
*mapping
,
928 loff_t pos
, unsigned len
, unsigned copied
,
929 struct page
*page
, void *fsdata
)
931 struct inode
*inode
= mapping
->host
;
932 /* According to comment in simple_write_end i_mutex is held */
933 loff_t i_size
= inode
->i_size
;
936 ret
= simple_write_end(file
, mapping
,pos
, len
, copied
, page
, fsdata
);
938 _write_failed(inode
, pos
+ len
);
940 /* TODO: once simple_write_end marks inode dirty remove */
941 if (i_size
!= inode
->i_size
)
942 mark_inode_dirty(inode
);
946 static int exofs_releasepage(struct page
*page
, gfp_t gfp
)
948 EXOFS_DBGMSG("page 0x%lx\n", page
->index
);
953 static void exofs_invalidatepage(struct page
*page
, unsigned int offset
,
956 EXOFS_DBGMSG("page 0x%lx offset 0x%x length 0x%x\n",
957 page
->index
, offset
, length
);
962 /* TODO: Should be easy enough to do proprly */
963 static ssize_t
exofs_direct_IO(struct kiocb
*iocb
, struct iov_iter
*iter
,
969 const struct address_space_operations exofs_aops
= {
970 .readpage
= exofs_readpage
,
971 .readpages
= exofs_readpages
,
973 .writepages
= exofs_writepages
,
974 .write_begin
= exofs_write_begin_export
,
975 .write_end
= exofs_write_end
,
976 .releasepage
= exofs_releasepage
,
977 .set_page_dirty
= __set_page_dirty_nobuffers
,
978 .invalidatepage
= exofs_invalidatepage
,
980 /* Not implemented Yet */
981 .bmap
= NULL
, /* TODO: use osd's OSD_ACT_READ_MAP */
982 .direct_IO
= exofs_direct_IO
,
984 /* With these NULL has special meaning or default is not exported */
986 .launder_page
= NULL
,
987 .is_partially_uptodate
= NULL
,
988 .error_remove_page
= NULL
,
991 /******************************************************************************
993 *****************************************************************************/
996 * Test whether an inode is a fast symlink.
998 static inline int exofs_inode_is_fast_symlink(struct inode
*inode
)
1000 struct exofs_i_info
*oi
= exofs_i(inode
);
1002 return S_ISLNK(inode
->i_mode
) && (oi
->i_data
[0] != 0);
1005 static int _do_truncate(struct inode
*inode
, loff_t newsize
)
1007 struct exofs_i_info
*oi
= exofs_i(inode
);
1008 struct exofs_sb_info
*sbi
= inode
->i_sb
->s_fs_info
;
1011 inode
->i_mtime
= inode
->i_ctime
= CURRENT_TIME
;
1013 ret
= ore_truncate(&sbi
->layout
, &oi
->oc
, (u64
)newsize
);
1015 truncate_setsize(inode
, newsize
);
1017 EXOFS_DBGMSG2("(0x%lx) size=0x%llx ret=>%d\n",
1018 inode
->i_ino
, newsize
, ret
);
1023 * Set inode attributes - update size attribute on OSD if needed,
1024 * otherwise just call generic functions.
1026 int exofs_setattr(struct dentry
*dentry
, struct iattr
*iattr
)
1028 struct inode
*inode
= d_inode(dentry
);
1031 /* if we are about to modify an object, and it hasn't been
1034 error
= wait_obj_created(exofs_i(inode
));
1035 if (unlikely(error
))
1038 error
= inode_change_ok(inode
, iattr
);
1039 if (unlikely(error
))
1042 if ((iattr
->ia_valid
& ATTR_SIZE
) &&
1043 iattr
->ia_size
!= i_size_read(inode
)) {
1044 error
= _do_truncate(inode
, iattr
->ia_size
);
1045 if (unlikely(error
))
1049 setattr_copy(inode
, iattr
);
1050 mark_inode_dirty(inode
);
1054 static const struct osd_attr g_attr_inode_file_layout
= ATTR_DEF(
1055 EXOFS_APAGE_FS_DATA
,
1056 EXOFS_ATTR_INODE_FILE_LAYOUT
,
1058 static const struct osd_attr g_attr_inode_dir_layout
= ATTR_DEF(
1059 EXOFS_APAGE_FS_DATA
,
1060 EXOFS_ATTR_INODE_DIR_LAYOUT
,
1064 * Read the Linux inode info from the OSD, and return it as is. In exofs the
1065 * inode info is in an application specific page/attribute of the osd-object.
1067 static int exofs_get_inode(struct super_block
*sb
, struct exofs_i_info
*oi
,
1068 struct exofs_fcb
*inode
)
1070 struct exofs_sb_info
*sbi
= sb
->s_fs_info
;
1071 struct osd_attr attrs
[] = {
1072 [0] = g_attr_inode_data
,
1073 [1] = g_attr_inode_file_layout
,
1074 [2] = g_attr_inode_dir_layout
,
1076 struct ore_io_state
*ios
;
1077 struct exofs_on_disk_inode_layout
*layout
;
1080 ret
= ore_get_io_state(&sbi
->layout
, &oi
->oc
, &ios
);
1081 if (unlikely(ret
)) {
1082 EXOFS_ERR("%s: ore_get_io_state failed.\n", __func__
);
1086 attrs
[1].len
= exofs_on_disk_inode_layout_size(sbi
->oc
.numdevs
);
1087 attrs
[2].len
= exofs_on_disk_inode_layout_size(sbi
->oc
.numdevs
);
1089 ios
->in_attr
= attrs
;
1090 ios
->in_attr_len
= ARRAY_SIZE(attrs
);
1092 ret
= ore_read(ios
);
1093 if (unlikely(ret
)) {
1094 EXOFS_ERR("object(0x%llx) corrupted, return empty file=>%d\n",
1095 _LLU(oi
->one_comp
.obj
.id
), ret
);
1096 memset(inode
, 0, sizeof(*inode
));
1097 inode
->i_mode
= 0040000 | (0777 & ~022);
1098 /* If object is lost on target we might as well enable it's
1105 ret
= extract_attr_from_ios(ios
, &attrs
[0]);
1107 EXOFS_ERR("%s: extract_attr 0 of inode failed\n", __func__
);
1110 WARN_ON(attrs
[0].len
!= EXOFS_INO_ATTR_SIZE
);
1111 memcpy(inode
, attrs
[0].val_ptr
, EXOFS_INO_ATTR_SIZE
);
1113 ret
= extract_attr_from_ios(ios
, &attrs
[1]);
1115 EXOFS_ERR("%s: extract_attr 1 of inode failed\n", __func__
);
1119 layout
= attrs
[1].val_ptr
;
1120 if (layout
->gen_func
!= cpu_to_le16(LAYOUT_MOVING_WINDOW
)) {
1121 EXOFS_ERR("%s: unsupported files layout %d\n",
1122 __func__
, layout
->gen_func
);
1128 ret
= extract_attr_from_ios(ios
, &attrs
[2]);
1130 EXOFS_ERR("%s: extract_attr 2 of inode failed\n", __func__
);
1134 layout
= attrs
[2].val_ptr
;
1135 if (layout
->gen_func
!= cpu_to_le16(LAYOUT_MOVING_WINDOW
)) {
1136 EXOFS_ERR("%s: unsupported meta-data layout %d\n",
1137 __func__
, layout
->gen_func
);
1144 ore_put_io_state(ios
);
1148 static void __oi_init(struct exofs_i_info
*oi
)
1150 init_waitqueue_head(&oi
->i_wq
);
1154 * Fill in an inode read from the OSD and set it up for use
1156 struct inode
*exofs_iget(struct super_block
*sb
, unsigned long ino
)
1158 struct exofs_i_info
*oi
;
1159 struct exofs_fcb fcb
;
1160 struct inode
*inode
;
1163 inode
= iget_locked(sb
, ino
);
1165 return ERR_PTR(-ENOMEM
);
1166 if (!(inode
->i_state
& I_NEW
))
1168 oi
= exofs_i(inode
);
1170 exofs_init_comps(&oi
->oc
, &oi
->one_comp
, sb
->s_fs_info
,
1171 exofs_oi_objno(oi
));
1173 /* read the inode from the osd */
1174 ret
= exofs_get_inode(sb
, oi
, &fcb
);
1178 set_obj_created(oi
);
1180 /* copy stuff from on-disk struct to in-memory struct */
1181 inode
->i_mode
= le16_to_cpu(fcb
.i_mode
);
1182 i_uid_write(inode
, le32_to_cpu(fcb
.i_uid
));
1183 i_gid_write(inode
, le32_to_cpu(fcb
.i_gid
));
1184 set_nlink(inode
, le16_to_cpu(fcb
.i_links_count
));
1185 inode
->i_ctime
.tv_sec
= (signed)le32_to_cpu(fcb
.i_ctime
);
1186 inode
->i_atime
.tv_sec
= (signed)le32_to_cpu(fcb
.i_atime
);
1187 inode
->i_mtime
.tv_sec
= (signed)le32_to_cpu(fcb
.i_mtime
);
1188 inode
->i_ctime
.tv_nsec
=
1189 inode
->i_atime
.tv_nsec
= inode
->i_mtime
.tv_nsec
= 0;
1190 oi
->i_commit_size
= le64_to_cpu(fcb
.i_size
);
1191 i_size_write(inode
, oi
->i_commit_size
);
1192 inode
->i_blkbits
= EXOFS_BLKSHIFT
;
1193 inode
->i_generation
= le32_to_cpu(fcb
.i_generation
);
1195 oi
->i_dir_start_lookup
= 0;
1197 if ((inode
->i_nlink
== 0) && (inode
->i_mode
== 0)) {
1202 if (S_ISCHR(inode
->i_mode
) || S_ISBLK(inode
->i_mode
)) {
1205 old_decode_dev(le32_to_cpu(fcb
.i_data
[0]));
1208 new_decode_dev(le32_to_cpu(fcb
.i_data
[1]));
1210 memcpy(oi
->i_data
, fcb
.i_data
, sizeof(fcb
.i_data
));
1213 if (S_ISREG(inode
->i_mode
)) {
1214 inode
->i_op
= &exofs_file_inode_operations
;
1215 inode
->i_fop
= &exofs_file_operations
;
1216 inode
->i_mapping
->a_ops
= &exofs_aops
;
1217 } else if (S_ISDIR(inode
->i_mode
)) {
1218 inode
->i_op
= &exofs_dir_inode_operations
;
1219 inode
->i_fop
= &exofs_dir_operations
;
1220 inode
->i_mapping
->a_ops
= &exofs_aops
;
1221 } else if (S_ISLNK(inode
->i_mode
)) {
1222 if (exofs_inode_is_fast_symlink(inode
)) {
1223 inode
->i_op
= &simple_symlink_inode_operations
;
1224 inode
->i_link
= (char *)oi
->i_data
;
1226 inode
->i_op
= &page_symlink_inode_operations
;
1227 inode
->i_mapping
->a_ops
= &exofs_aops
;
1230 inode
->i_op
= &exofs_special_inode_operations
;
1232 init_special_inode(inode
, inode
->i_mode
,
1233 old_decode_dev(le32_to_cpu(fcb
.i_data
[0])));
1235 init_special_inode(inode
, inode
->i_mode
,
1236 new_decode_dev(le32_to_cpu(fcb
.i_data
[1])));
1239 unlock_new_inode(inode
);
1244 return ERR_PTR(ret
);
1247 int __exofs_wait_obj_created(struct exofs_i_info
*oi
)
1249 if (!obj_created(oi
)) {
1250 EXOFS_DBGMSG("!obj_created\n");
1251 BUG_ON(!obj_2bcreated(oi
));
1252 wait_event(oi
->i_wq
, obj_created(oi
));
1253 EXOFS_DBGMSG("wait_event done\n");
1255 return unlikely(is_bad_inode(&oi
->vfs_inode
)) ? -EIO
: 0;
1259 * Callback function from exofs_new_inode(). The important thing is that we
1260 * set the obj_created flag so that other methods know that the object exists on
1263 static void create_done(struct ore_io_state
*ios
, void *p
)
1265 struct inode
*inode
= p
;
1266 struct exofs_i_info
*oi
= exofs_i(inode
);
1267 struct exofs_sb_info
*sbi
= inode
->i_sb
->s_fs_info
;
1270 ret
= ore_check_io(ios
, NULL
);
1271 ore_put_io_state(ios
);
1273 atomic_dec(&sbi
->s_curr_pending
);
1275 if (unlikely(ret
)) {
1276 EXOFS_ERR("object=0x%llx creation failed in pid=0x%llx",
1277 _LLU(exofs_oi_objno(oi
)),
1278 _LLU(oi
->one_comp
.obj
.partition
));
1279 /*TODO: When FS is corrupted creation can fail, object already
1280 * exist. Get rid of this asynchronous creation, if exist
1281 * increment the obj counter and try the next object. Until we
1282 * succeed. All these dangling objects will be made into lost
1283 * files by chkfs.exofs
1287 set_obj_created(oi
);
1293 * Set up a new inode and create an object for it on the OSD
1295 struct inode
*exofs_new_inode(struct inode
*dir
, umode_t mode
)
1297 struct super_block
*sb
= dir
->i_sb
;
1298 struct exofs_sb_info
*sbi
= sb
->s_fs_info
;
1299 struct inode
*inode
;
1300 struct exofs_i_info
*oi
;
1301 struct ore_io_state
*ios
;
1304 inode
= new_inode(sb
);
1306 return ERR_PTR(-ENOMEM
);
1308 oi
= exofs_i(inode
);
1311 set_obj_2bcreated(oi
);
1313 inode_init_owner(inode
, dir
, mode
);
1314 inode
->i_ino
= sbi
->s_nextid
++;
1315 inode
->i_blkbits
= EXOFS_BLKSHIFT
;
1316 inode
->i_mtime
= inode
->i_atime
= inode
->i_ctime
= CURRENT_TIME
;
1317 oi
->i_commit_size
= inode
->i_size
= 0;
1318 spin_lock(&sbi
->s_next_gen_lock
);
1319 inode
->i_generation
= sbi
->s_next_generation
++;
1320 spin_unlock(&sbi
->s_next_gen_lock
);
1321 insert_inode_hash(inode
);
1323 exofs_init_comps(&oi
->oc
, &oi
->one_comp
, sb
->s_fs_info
,
1324 exofs_oi_objno(oi
));
1325 exofs_sbi_write_stats(sbi
); /* Make sure new sbi->s_nextid is on disk */
1327 mark_inode_dirty(inode
);
1329 ret
= ore_get_io_state(&sbi
->layout
, &oi
->oc
, &ios
);
1330 if (unlikely(ret
)) {
1331 EXOFS_ERR("exofs_new_inode: ore_get_io_state failed\n");
1332 return ERR_PTR(ret
);
1335 ios
->done
= create_done
;
1336 ios
->private = inode
;
1338 ret
= ore_create(ios
);
1340 ore_put_io_state(ios
);
1341 return ERR_PTR(ret
);
1343 atomic_inc(&sbi
->s_curr_pending
);
1349 * struct to pass two arguments to update_inode's callback
1351 struct updatei_args
{
1352 struct exofs_sb_info
*sbi
;
1353 struct exofs_fcb fcb
;
1357 * Callback function from exofs_update_inode().
1359 static void updatei_done(struct ore_io_state
*ios
, void *p
)
1361 struct updatei_args
*args
= p
;
1363 ore_put_io_state(ios
);
1365 atomic_dec(&args
->sbi
->s_curr_pending
);
1371 * Write the inode to the OSD. Just fill up the struct, and set the attribute
1372 * synchronously or asynchronously depending on the do_sync flag.
1374 static int exofs_update_inode(struct inode
*inode
, int do_sync
)
1376 struct exofs_i_info
*oi
= exofs_i(inode
);
1377 struct super_block
*sb
= inode
->i_sb
;
1378 struct exofs_sb_info
*sbi
= sb
->s_fs_info
;
1379 struct ore_io_state
*ios
;
1380 struct osd_attr attr
;
1381 struct exofs_fcb
*fcb
;
1382 struct updatei_args
*args
;
1385 args
= kzalloc(sizeof(*args
), GFP_KERNEL
);
1387 EXOFS_DBGMSG("Failed kzalloc of args\n");
1393 fcb
->i_mode
= cpu_to_le16(inode
->i_mode
);
1394 fcb
->i_uid
= cpu_to_le32(i_uid_read(inode
));
1395 fcb
->i_gid
= cpu_to_le32(i_gid_read(inode
));
1396 fcb
->i_links_count
= cpu_to_le16(inode
->i_nlink
);
1397 fcb
->i_ctime
= cpu_to_le32(inode
->i_ctime
.tv_sec
);
1398 fcb
->i_atime
= cpu_to_le32(inode
->i_atime
.tv_sec
);
1399 fcb
->i_mtime
= cpu_to_le32(inode
->i_mtime
.tv_sec
);
1400 oi
->i_commit_size
= i_size_read(inode
);
1401 fcb
->i_size
= cpu_to_le64(oi
->i_commit_size
);
1402 fcb
->i_generation
= cpu_to_le32(inode
->i_generation
);
1404 if (S_ISCHR(inode
->i_mode
) || S_ISBLK(inode
->i_mode
)) {
1405 if (old_valid_dev(inode
->i_rdev
)) {
1407 cpu_to_le32(old_encode_dev(inode
->i_rdev
));
1412 cpu_to_le32(new_encode_dev(inode
->i_rdev
));
1416 memcpy(fcb
->i_data
, oi
->i_data
, sizeof(fcb
->i_data
));
1418 ret
= ore_get_io_state(&sbi
->layout
, &oi
->oc
, &ios
);
1419 if (unlikely(ret
)) {
1420 EXOFS_ERR("%s: ore_get_io_state failed.\n", __func__
);
1424 attr
= g_attr_inode_data
;
1426 ios
->out_attr_len
= 1;
1427 ios
->out_attr
= &attr
;
1429 wait_obj_created(oi
);
1433 ios
->done
= updatei_done
;
1434 ios
->private = args
;
1437 ret
= ore_write(ios
);
1438 if (!do_sync
&& !ret
) {
1439 atomic_inc(&sbi
->s_curr_pending
);
1440 goto out
; /* deallocation in updatei_done */
1443 ore_put_io_state(ios
);
1447 EXOFS_DBGMSG("(0x%lx) do_sync=%d ret=>%d\n",
1448 inode
->i_ino
, do_sync
, ret
);
1452 int exofs_write_inode(struct inode
*inode
, struct writeback_control
*wbc
)
1454 /* FIXME: fix fsync and use wbc->sync_mode == WB_SYNC_ALL */
1455 return exofs_update_inode(inode
, 1);
1459 * Callback function from exofs_delete_inode() - don't have much cleaning up to
1462 static void delete_done(struct ore_io_state
*ios
, void *p
)
1464 struct exofs_sb_info
*sbi
= p
;
1466 ore_put_io_state(ios
);
1468 atomic_dec(&sbi
->s_curr_pending
);
1472 * Called when the refcount of an inode reaches zero. We remove the object
1473 * from the OSD here. We make sure the object was created before we try and
1476 void exofs_evict_inode(struct inode
*inode
)
1478 struct exofs_i_info
*oi
= exofs_i(inode
);
1479 struct super_block
*sb
= inode
->i_sb
;
1480 struct exofs_sb_info
*sbi
= sb
->s_fs_info
;
1481 struct ore_io_state
*ios
;
1484 truncate_inode_pages_final(&inode
->i_data
);
1486 /* TODO: should do better here */
1487 if (inode
->i_nlink
|| is_bad_inode(inode
))
1493 /* if we are deleting an obj that hasn't been created yet, wait.
1494 * This also makes sure that create_done cannot be called with an
1495 * already evicted inode.
1497 wait_obj_created(oi
);
1498 /* ignore the error, attempt a remove anyway */
1500 /* Now Remove the OSD objects */
1501 ret
= ore_get_io_state(&sbi
->layout
, &oi
->oc
, &ios
);
1502 if (unlikely(ret
)) {
1503 EXOFS_ERR("%s: ore_get_io_state failed\n", __func__
);
1507 ios
->done
= delete_done
;
1510 ret
= ore_remove(ios
);
1512 EXOFS_ERR("%s: ore_remove failed\n", __func__
);
1513 ore_put_io_state(ios
);
1516 atomic_inc(&sbi
->s_curr_pending
);