3 * Copyright (c) 2013, Intel Corporation
4 * Authors: Huajun Li <huajun.li@intel.com>
5 * Haicheng Li <haicheng.li@intel.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
12 #include <linux/f2fs_fs.h>
16 bool f2fs_may_inline(struct inode
*inode
)
18 struct f2fs_sb_info
*sbi
= F2FS_SB(inode
->i_sb
);
22 if (!test_opt(sbi
, INLINE_DATA
))
25 nr_blocks
= F2FS_I(inode
)->i_xattr_nid
? 3 : 2;
26 if (inode
->i_blocks
> nr_blocks
)
29 i_size
= i_size_read(inode
);
30 if (i_size
> MAX_INLINE_DATA
)
36 int f2fs_read_inline_data(struct inode
*inode
, struct page
*page
)
38 struct f2fs_sb_info
*sbi
= F2FS_SB(inode
->i_sb
);
40 void *src_addr
, *dst_addr
;
43 zero_user_segment(page
, 0, PAGE_CACHE_SIZE
);
47 ipage
= get_node_page(sbi
, inode
->i_ino
);
50 return PTR_ERR(ipage
);
53 zero_user_segment(page
, MAX_INLINE_DATA
, PAGE_CACHE_SIZE
);
55 /* Copy the whole inline data block */
56 src_addr
= inline_data_addr(ipage
);
57 dst_addr
= kmap(page
);
58 memcpy(dst_addr
, src_addr
, MAX_INLINE_DATA
);
60 f2fs_put_page(ipage
, 1);
63 SetPageUptodate(page
);
69 static int __f2fs_convert_inline_data(struct inode
*inode
, struct page
*page
)
73 struct dnode_of_data dn
;
74 void *src_addr
, *dst_addr
;
76 struct f2fs_sb_info
*sbi
= F2FS_SB(inode
->i_sb
);
77 struct f2fs_io_info fio
= {
79 .rw
= WRITE_SYNC
| REQ_PRIO
,
83 ipage
= get_node_page(sbi
, inode
->i_ino
);
90 * i_addr[0] is not used for inline data,
91 * so reserving new block will not destroy inline data
93 set_new_dnode(&dn
, inode
, ipage
, NULL
, 0);
94 err
= f2fs_reserve_block(&dn
, 0);
98 f2fs_wait_on_page_writeback(page
, DATA
);
99 zero_user_segment(page
, MAX_INLINE_DATA
, PAGE_CACHE_SIZE
);
101 /* Copy the whole inline data block */
102 src_addr
= inline_data_addr(ipage
);
103 dst_addr
= kmap(page
);
104 memcpy(dst_addr
, src_addr
, MAX_INLINE_DATA
);
106 SetPageUptodate(page
);
108 /* write data page to try to make data consistent */
109 set_page_writeback(page
);
110 write_data_page(page
, &dn
, &new_blk_addr
, &fio
);
111 update_extent_cache(new_blk_addr
, &dn
);
112 f2fs_wait_on_page_writeback(page
, DATA
);
114 /* clear inline data and flag after data writeback */
115 zero_user_segment(ipage
, INLINE_DATA_OFFSET
,
116 INLINE_DATA_OFFSET
+ MAX_INLINE_DATA
);
117 clear_inode_flag(F2FS_I(inode
), FI_INLINE_DATA
);
118 stat_dec_inline_inode(inode
);
120 sync_inode_page(&dn
);
127 int f2fs_convert_inline_data(struct inode
*inode
, pgoff_t to_size
)
132 if (!f2fs_has_inline_data(inode
))
134 else if (to_size
<= MAX_INLINE_DATA
)
137 page
= grab_cache_page(inode
->i_mapping
, 0);
141 err
= __f2fs_convert_inline_data(inode
, page
);
142 f2fs_put_page(page
, 1);
146 int f2fs_write_inline_data(struct inode
*inode
,
147 struct page
*page
, unsigned size
)
149 void *src_addr
, *dst_addr
;
151 struct dnode_of_data dn
;
154 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
155 err
= get_dnode_of_data(&dn
, 0, LOOKUP_NODE
);
158 ipage
= dn
.inode_page
;
160 f2fs_wait_on_page_writeback(ipage
, NODE
);
161 zero_user_segment(ipage
, INLINE_DATA_OFFSET
,
162 INLINE_DATA_OFFSET
+ MAX_INLINE_DATA
);
163 src_addr
= kmap(page
);
164 dst_addr
= inline_data_addr(ipage
);
165 memcpy(dst_addr
, src_addr
, size
);
168 /* Release the first data block if it is allocated */
169 if (!f2fs_has_inline_data(inode
)) {
170 truncate_data_blocks_range(&dn
, 1);
171 set_inode_flag(F2FS_I(inode
), FI_INLINE_DATA
);
172 stat_inc_inline_inode(inode
);
175 sync_inode_page(&dn
);
181 void truncate_inline_data(struct inode
*inode
, u64 from
)
183 struct f2fs_sb_info
*sbi
= F2FS_SB(inode
->i_sb
);
186 if (from
>= MAX_INLINE_DATA
)
189 ipage
= get_node_page(sbi
, inode
->i_ino
);
193 f2fs_wait_on_page_writeback(ipage
, NODE
);
195 zero_user_segment(ipage
, INLINE_DATA_OFFSET
+ from
,
196 INLINE_DATA_OFFSET
+ MAX_INLINE_DATA
);
197 set_page_dirty(ipage
);
198 f2fs_put_page(ipage
, 1);
201 int recover_inline_data(struct inode
*inode
, struct page
*npage
)
203 struct f2fs_sb_info
*sbi
= F2FS_SB(inode
->i_sb
);
204 struct f2fs_inode
*ri
= NULL
;
205 void *src_addr
, *dst_addr
;
209 * The inline_data recovery policy is as follows.
210 * [prev.] [next] of inline_data flag
211 * o o -> recover inline_data
212 * o x -> remove inline_data, and then recover data blocks
213 * x o -> remove inline_data, and then recover inline_data
214 * x x -> recover data blocks
217 ri
= F2FS_INODE(npage
);
219 if (f2fs_has_inline_data(inode
) &&
220 ri
&& ri
->i_inline
& F2FS_INLINE_DATA
) {
222 ipage
= get_node_page(sbi
, inode
->i_ino
);
223 f2fs_bug_on(IS_ERR(ipage
));
225 f2fs_wait_on_page_writeback(ipage
, NODE
);
227 src_addr
= inline_data_addr(npage
);
228 dst_addr
= inline_data_addr(ipage
);
229 memcpy(dst_addr
, src_addr
, MAX_INLINE_DATA
);
230 update_inode(inode
, ipage
);
231 f2fs_put_page(ipage
, 1);
235 if (f2fs_has_inline_data(inode
)) {
236 ipage
= get_node_page(sbi
, inode
->i_ino
);
237 f2fs_bug_on(IS_ERR(ipage
));
238 f2fs_wait_on_page_writeback(ipage
, NODE
);
239 zero_user_segment(ipage
, INLINE_DATA_OFFSET
,
240 INLINE_DATA_OFFSET
+ MAX_INLINE_DATA
);
241 clear_inode_flag(F2FS_I(inode
), FI_INLINE_DATA
);
242 update_inode(inode
, ipage
);
243 f2fs_put_page(ipage
, 1);
244 } else if (ri
&& ri
->i_inline
& F2FS_INLINE_DATA
) {
245 truncate_blocks(inode
, 0);
246 set_inode_flag(F2FS_I(inode
), FI_INLINE_DATA
);