3 * Copyright (c) 2013, Intel Corporation
4 * Authors: Huajun Li <huajun.li@intel.com>
5 * Haicheng Li <haicheng.li@intel.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
12 #include <linux/f2fs_fs.h>
16 bool f2fs_may_inline(struct inode
*inode
)
18 if (!test_opt(F2FS_I_SB(inode
), INLINE_DATA
))
21 if (f2fs_is_atomic_file(inode
))
24 if (!S_ISREG(inode
->i_mode
))
27 if (i_size_read(inode
) > MAX_INLINE_DATA
)
33 void read_inline_data(struct page
*page
, struct page
*ipage
)
35 void *src_addr
, *dst_addr
;
37 if (PageUptodate(page
))
40 f2fs_bug_on(F2FS_P_SB(page
), page
->index
);
42 zero_user_segment(page
, MAX_INLINE_DATA
, PAGE_CACHE_SIZE
);
44 /* Copy the whole inline data block */
45 src_addr
= inline_data_addr(ipage
);
46 dst_addr
= kmap_atomic(page
);
47 memcpy(dst_addr
, src_addr
, MAX_INLINE_DATA
);
48 flush_dcache_page(page
);
49 kunmap_atomic(dst_addr
);
50 SetPageUptodate(page
);
53 static void truncate_inline_data(struct page
*ipage
)
55 f2fs_wait_on_page_writeback(ipage
, NODE
);
56 memset(inline_data_addr(ipage
), 0, MAX_INLINE_DATA
);
59 int f2fs_read_inline_data(struct inode
*inode
, struct page
*page
)
63 ipage
= get_node_page(F2FS_I_SB(inode
), inode
->i_ino
);
66 return PTR_ERR(ipage
);
69 if (!f2fs_has_inline_data(inode
)) {
70 f2fs_put_page(ipage
, 1);
75 zero_user_segment(page
, 0, PAGE_CACHE_SIZE
);
77 read_inline_data(page
, ipage
);
79 SetPageUptodate(page
);
80 f2fs_put_page(ipage
, 1);
85 int f2fs_convert_inline_page(struct dnode_of_data
*dn
, struct page
*page
)
87 void *src_addr
, *dst_addr
;
88 struct f2fs_io_info fio
= {
90 .rw
= WRITE_SYNC
| REQ_PRIO
,
94 f2fs_bug_on(F2FS_I_SB(dn
->inode
), page
->index
);
96 if (!f2fs_exist_data(dn
->inode
))
99 err
= f2fs_reserve_block(dn
, 0);
103 f2fs_wait_on_page_writeback(page
, DATA
);
105 if (PageUptodate(page
))
108 zero_user_segment(page
, MAX_INLINE_DATA
, PAGE_CACHE_SIZE
);
110 /* Copy the whole inline data block */
111 src_addr
= inline_data_addr(dn
->inode_page
);
112 dst_addr
= kmap_atomic(page
);
113 memcpy(dst_addr
, src_addr
, MAX_INLINE_DATA
);
114 flush_dcache_page(page
);
115 kunmap_atomic(dst_addr
);
116 SetPageUptodate(page
);
118 /* clear dirty state */
119 dirty
= clear_page_dirty_for_io(page
);
121 /* write data page to try to make data consistent */
122 set_page_writeback(page
);
123 fio
.blk_addr
= dn
->data_blkaddr
;
124 write_data_page(page
, dn
, &fio
);
125 update_extent_cache(dn
);
126 f2fs_wait_on_page_writeback(page
, DATA
);
128 inode_dec_dirty_pages(dn
->inode
);
130 /* this converted inline_data should be recovered. */
131 set_inode_flag(F2FS_I(dn
->inode
), FI_APPEND_WRITE
);
133 /* clear inline data and flag after data writeback */
134 truncate_inline_data(dn
->inode_page
);
136 stat_dec_inline_inode(dn
->inode
);
137 f2fs_clear_inline_inode(dn
->inode
);
143 int f2fs_convert_inline_inode(struct inode
*inode
)
145 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
146 struct dnode_of_data dn
;
147 struct page
*ipage
, *page
;
150 page
= grab_cache_page(inode
->i_mapping
, 0);
156 ipage
= get_node_page(sbi
, inode
->i_ino
);
158 err
= PTR_ERR(ipage
);
162 set_new_dnode(&dn
, inode
, ipage
, ipage
, 0);
164 if (f2fs_has_inline_data(inode
))
165 err
= f2fs_convert_inline_page(&dn
, page
);
171 f2fs_put_page(page
, 1);
175 int f2fs_write_inline_data(struct inode
*inode
, struct page
*page
)
177 void *src_addr
, *dst_addr
;
178 struct dnode_of_data dn
;
181 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
182 err
= get_dnode_of_data(&dn
, 0, LOOKUP_NODE
);
186 if (!f2fs_has_inline_data(inode
)) {
191 f2fs_bug_on(F2FS_I_SB(inode
), page
->index
);
193 f2fs_wait_on_page_writeback(dn
.inode_page
, NODE
);
194 src_addr
= kmap_atomic(page
);
195 dst_addr
= inline_data_addr(dn
.inode_page
);
196 memcpy(dst_addr
, src_addr
, MAX_INLINE_DATA
);
197 kunmap_atomic(src_addr
);
199 set_inode_flag(F2FS_I(inode
), FI_APPEND_WRITE
);
200 set_inode_flag(F2FS_I(inode
), FI_DATA_EXIST
);
202 sync_inode_page(&dn
);
207 bool recover_inline_data(struct inode
*inode
, struct page
*npage
)
209 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
210 struct f2fs_inode
*ri
= NULL
;
211 void *src_addr
, *dst_addr
;
215 * The inline_data recovery policy is as follows.
216 * [prev.] [next] of inline_data flag
217 * o o -> recover inline_data
218 * o x -> remove inline_data, and then recover data blocks
219 * x o -> remove inline_data, and then recover inline_data
220 * x x -> recover data blocks
223 ri
= F2FS_INODE(npage
);
225 if (f2fs_has_inline_data(inode
) &&
226 ri
&& (ri
->i_inline
& F2FS_INLINE_DATA
)) {
228 ipage
= get_node_page(sbi
, inode
->i_ino
);
229 f2fs_bug_on(sbi
, IS_ERR(ipage
));
231 f2fs_wait_on_page_writeback(ipage
, NODE
);
233 src_addr
= inline_data_addr(npage
);
234 dst_addr
= inline_data_addr(ipage
);
235 memcpy(dst_addr
, src_addr
, MAX_INLINE_DATA
);
237 set_inode_flag(F2FS_I(inode
), FI_INLINE_DATA
);
238 set_inode_flag(F2FS_I(inode
), FI_DATA_EXIST
);
240 update_inode(inode
, ipage
);
241 f2fs_put_page(ipage
, 1);
245 if (f2fs_has_inline_data(inode
)) {
246 ipage
= get_node_page(sbi
, inode
->i_ino
);
247 f2fs_bug_on(sbi
, IS_ERR(ipage
));
248 truncate_inline_data(ipage
);
249 f2fs_clear_inline_inode(inode
);
250 update_inode(inode
, ipage
);
251 f2fs_put_page(ipage
, 1);
252 } else if (ri
&& (ri
->i_inline
& F2FS_INLINE_DATA
)) {
253 truncate_blocks(inode
, 0, false);
259 struct f2fs_dir_entry
*find_in_inline_dir(struct inode
*dir
,
260 struct qstr
*name
, struct page
**res_page
)
262 struct f2fs_sb_info
*sbi
= F2FS_SB(dir
->i_sb
);
263 struct f2fs_inline_dentry
*inline_dentry
;
264 struct f2fs_dir_entry
*de
;
265 struct f2fs_dentry_ptr d
;
268 ipage
= get_node_page(sbi
, dir
->i_ino
);
272 inline_dentry
= inline_data_addr(ipage
);
274 make_dentry_ptr(&d
, (void *)inline_dentry
, 2);
275 de
= find_target_dentry(name
, NULL
, &d
);
281 f2fs_put_page(ipage
, 0);
284 * For the most part, it should be a bug when name_len is zero.
285 * We stop here for figuring out where the bugs has occurred.
287 f2fs_bug_on(sbi
, d
.max
< 0);
291 struct f2fs_dir_entry
*f2fs_parent_inline_dir(struct inode
*dir
,
294 struct f2fs_sb_info
*sbi
= F2FS_I_SB(dir
);
296 struct f2fs_dir_entry
*de
;
297 struct f2fs_inline_dentry
*dentry_blk
;
299 ipage
= get_node_page(sbi
, dir
->i_ino
);
303 dentry_blk
= inline_data_addr(ipage
);
304 de
= &dentry_blk
->dentry
[1];
310 int make_empty_inline_dir(struct inode
*inode
, struct inode
*parent
,
313 struct f2fs_inline_dentry
*dentry_blk
;
314 struct f2fs_dentry_ptr d
;
316 dentry_blk
= inline_data_addr(ipage
);
318 make_dentry_ptr(&d
, (void *)dentry_blk
, 2);
319 do_make_empty_dir(inode
, parent
, &d
);
321 set_page_dirty(ipage
);
323 /* update i_size to MAX_INLINE_DATA */
324 if (i_size_read(inode
) < MAX_INLINE_DATA
) {
325 i_size_write(inode
, MAX_INLINE_DATA
);
326 set_inode_flag(F2FS_I(inode
), FI_UPDATE_DIR
);
331 static int f2fs_convert_inline_dir(struct inode
*dir
, struct page
*ipage
,
332 struct f2fs_inline_dentry
*inline_dentry
)
335 struct dnode_of_data dn
;
336 struct f2fs_dentry_block
*dentry_blk
;
339 page
= grab_cache_page(dir
->i_mapping
, 0);
343 set_new_dnode(&dn
, dir
, ipage
, NULL
, 0);
344 err
= f2fs_reserve_block(&dn
, 0);
348 f2fs_wait_on_page_writeback(page
, DATA
);
349 zero_user_segment(page
, 0, PAGE_CACHE_SIZE
);
351 dentry_blk
= kmap_atomic(page
);
353 /* copy data from inline dentry block to new dentry block */
354 memcpy(dentry_blk
->dentry_bitmap
, inline_dentry
->dentry_bitmap
,
355 INLINE_DENTRY_BITMAP_SIZE
);
356 memcpy(dentry_blk
->dentry
, inline_dentry
->dentry
,
357 sizeof(struct f2fs_dir_entry
) * NR_INLINE_DENTRY
);
358 memcpy(dentry_blk
->filename
, inline_dentry
->filename
,
359 NR_INLINE_DENTRY
* F2FS_SLOT_LEN
);
361 kunmap_atomic(dentry_blk
);
362 SetPageUptodate(page
);
363 set_page_dirty(page
);
365 /* clear inline dir and flag after data writeback */
366 truncate_inline_data(ipage
);
368 stat_dec_inline_dir(dir
);
369 clear_inode_flag(F2FS_I(dir
), FI_INLINE_DENTRY
);
371 if (i_size_read(dir
) < PAGE_CACHE_SIZE
) {
372 i_size_write(dir
, PAGE_CACHE_SIZE
);
373 set_inode_flag(F2FS_I(dir
), FI_UPDATE_DIR
);
376 sync_inode_page(&dn
);
378 f2fs_put_page(page
, 1);
382 int f2fs_add_inline_entry(struct inode
*dir
, const struct qstr
*name
,
385 struct f2fs_sb_info
*sbi
= F2FS_I_SB(dir
);
387 unsigned int bit_pos
;
388 f2fs_hash_t name_hash
;
389 struct f2fs_dir_entry
*de
;
390 size_t namelen
= name
->len
;
391 struct f2fs_inline_dentry
*dentry_blk
= NULL
;
392 int slots
= GET_DENTRY_SLOTS(namelen
);
397 name_hash
= f2fs_dentry_hash(name
);
399 ipage
= get_node_page(sbi
, dir
->i_ino
);
401 return PTR_ERR(ipage
);
403 dentry_blk
= inline_data_addr(ipage
);
404 bit_pos
= room_for_filename(&dentry_blk
->dentry_bitmap
,
405 slots
, NR_INLINE_DENTRY
);
406 if (bit_pos
>= NR_INLINE_DENTRY
) {
407 err
= f2fs_convert_inline_dir(dir
, ipage
, dentry_blk
);
413 down_write(&F2FS_I(inode
)->i_sem
);
414 page
= init_inode_metadata(inode
, dir
, name
, ipage
);
420 f2fs_wait_on_page_writeback(ipage
, NODE
);
421 de
= &dentry_blk
->dentry
[bit_pos
];
422 de
->hash_code
= name_hash
;
423 de
->name_len
= cpu_to_le16(namelen
);
424 memcpy(dentry_blk
->filename
[bit_pos
], name
->name
, name
->len
);
425 de
->ino
= cpu_to_le32(inode
->i_ino
);
426 set_de_type(de
, inode
);
427 for (i
= 0; i
< slots
; i
++)
428 test_and_set_bit_le(bit_pos
+ i
, &dentry_blk
->dentry_bitmap
);
429 set_page_dirty(ipage
);
431 /* we don't need to mark_inode_dirty now */
432 F2FS_I(inode
)->i_pino
= dir
->i_ino
;
433 update_inode(inode
, page
);
434 f2fs_put_page(page
, 1);
436 update_parent_metadata(dir
, inode
, 0);
438 up_write(&F2FS_I(inode
)->i_sem
);
440 if (is_inode_flag_set(F2FS_I(dir
), FI_UPDATE_DIR
)) {
441 update_inode(dir
, ipage
);
442 clear_inode_flag(F2FS_I(dir
), FI_UPDATE_DIR
);
445 f2fs_put_page(ipage
, 1);
449 void f2fs_delete_inline_entry(struct f2fs_dir_entry
*dentry
, struct page
*page
,
450 struct inode
*dir
, struct inode
*inode
)
452 struct f2fs_inline_dentry
*inline_dentry
;
453 int slots
= GET_DENTRY_SLOTS(le16_to_cpu(dentry
->name_len
));
454 unsigned int bit_pos
;
458 f2fs_wait_on_page_writeback(page
, NODE
);
460 inline_dentry
= inline_data_addr(page
);
461 bit_pos
= dentry
- inline_dentry
->dentry
;
462 for (i
= 0; i
< slots
; i
++)
463 test_and_clear_bit_le(bit_pos
+ i
,
464 &inline_dentry
->dentry_bitmap
);
466 set_page_dirty(page
);
468 dir
->i_ctime
= dir
->i_mtime
= CURRENT_TIME
;
471 f2fs_drop_nlink(dir
, inode
, page
);
473 f2fs_put_page(page
, 1);
476 bool f2fs_empty_inline_dir(struct inode
*dir
)
478 struct f2fs_sb_info
*sbi
= F2FS_I_SB(dir
);
480 unsigned int bit_pos
= 2;
481 struct f2fs_inline_dentry
*dentry_blk
;
483 ipage
= get_node_page(sbi
, dir
->i_ino
);
487 dentry_blk
= inline_data_addr(ipage
);
488 bit_pos
= find_next_bit_le(&dentry_blk
->dentry_bitmap
,
492 f2fs_put_page(ipage
, 1);
494 if (bit_pos
< NR_INLINE_DENTRY
)
500 int f2fs_read_inline_dir(struct file
*file
, struct dir_context
*ctx
)
502 struct inode
*inode
= file_inode(file
);
503 struct f2fs_inline_dentry
*inline_dentry
= NULL
;
504 struct page
*ipage
= NULL
;
505 struct f2fs_dentry_ptr d
;
507 if (ctx
->pos
== NR_INLINE_DENTRY
)
510 ipage
= get_node_page(F2FS_I_SB(inode
), inode
->i_ino
);
512 return PTR_ERR(ipage
);
514 inline_dentry
= inline_data_addr(ipage
);
516 make_dentry_ptr(&d
, (void *)inline_dentry
, 2);
518 if (!f2fs_fill_dentries(ctx
, &d
, 0))
519 ctx
->pos
= NR_INLINE_DENTRY
;
521 f2fs_put_page(ipage
, 1);