Merge tag 'regmap-fix-v5.11-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux/fpc-iii.git] / fs / btrfs / reflink.c
blobb03e7891394e36c838f83a7219beb065177f5ae0
1 // SPDX-License-Identifier: GPL-2.0
3 #include <linux/blkdev.h>
4 #include <linux/iversion.h>
5 #include "compression.h"
6 #include "ctree.h"
7 #include "delalloc-space.h"
8 #include "reflink.h"
9 #include "transaction.h"
11 #define BTRFS_MAX_DEDUPE_LEN SZ_16M
13 static int clone_finish_inode_update(struct btrfs_trans_handle *trans,
14 struct inode *inode,
15 u64 endoff,
16 const u64 destoff,
17 const u64 olen,
18 int no_time_update)
20 struct btrfs_root *root = BTRFS_I(inode)->root;
21 int ret;
23 inode_inc_iversion(inode);
24 if (!no_time_update)
25 inode->i_mtime = inode->i_ctime = current_time(inode);
27 * We round up to the block size at eof when determining which
28 * extents to clone above, but shouldn't round up the file size.
30 if (endoff > destoff + olen)
31 endoff = destoff + olen;
32 if (endoff > inode->i_size) {
33 i_size_write(inode, endoff);
34 btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode), 0);
37 ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
38 if (ret) {
39 btrfs_abort_transaction(trans, ret);
40 btrfs_end_transaction(trans);
41 goto out;
43 ret = btrfs_end_transaction(trans);
44 out:
45 return ret;
48 static int copy_inline_to_page(struct btrfs_inode *inode,
49 const u64 file_offset,
50 char *inline_data,
51 const u64 size,
52 const u64 datal,
53 const u8 comp_type)
55 const u64 block_size = btrfs_inode_sectorsize(inode);
56 const u64 range_end = file_offset + block_size - 1;
57 const size_t inline_size = size - btrfs_file_extent_calc_inline_size(0);
58 char *data_start = inline_data + btrfs_file_extent_calc_inline_size(0);
59 struct extent_changeset *data_reserved = NULL;
60 struct page *page = NULL;
61 struct address_space *mapping = inode->vfs_inode.i_mapping;
62 int ret;
64 ASSERT(IS_ALIGNED(file_offset, block_size));
67 * We have flushed and locked the ranges of the source and destination
68 * inodes, we also have locked the inodes, so we are safe to do a
69 * reservation here. Also we must not do the reservation while holding
70 * a transaction open, otherwise we would deadlock.
72 ret = btrfs_delalloc_reserve_space(inode, &data_reserved, file_offset,
73 block_size);
74 if (ret)
75 goto out;
77 page = find_or_create_page(mapping, file_offset >> PAGE_SHIFT,
78 btrfs_alloc_write_mask(mapping));
79 if (!page) {
80 ret = -ENOMEM;
81 goto out_unlock;
84 set_page_extent_mapped(page);
85 clear_extent_bit(&inode->io_tree, file_offset, range_end,
86 EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
87 0, 0, NULL);
88 ret = btrfs_set_extent_delalloc(inode, file_offset, range_end, 0, NULL);
89 if (ret)
90 goto out_unlock;
93 * After dirtying the page our caller will need to start a transaction,
94 * and if we are low on metadata free space, that can cause flushing of
95 * delalloc for all inodes in order to get metadata space released.
96 * However we are holding the range locked for the whole duration of
97 * the clone/dedupe operation, so we may deadlock if that happens and no
98 * other task releases enough space. So mark this inode as not being
99 * possible to flush to avoid such deadlock. We will clear that flag
100 * when we finish cloning all extents, since a transaction is started
101 * after finding each extent to clone.
103 set_bit(BTRFS_INODE_NO_DELALLOC_FLUSH, &inode->runtime_flags);
105 if (comp_type == BTRFS_COMPRESS_NONE) {
106 char *map;
108 map = kmap(page);
109 memcpy(map, data_start, datal);
110 flush_dcache_page(page);
111 kunmap(page);
112 } else {
113 ret = btrfs_decompress(comp_type, data_start, page, 0,
114 inline_size, datal);
115 if (ret)
116 goto out_unlock;
117 flush_dcache_page(page);
121 * If our inline data is smaller then the block/page size, then the
122 * remaining of the block/page is equivalent to zeroes. We had something
123 * like the following done:
125 * $ xfs_io -f -c "pwrite -S 0xab 0 500" file
126 * $ sync # (or fsync)
127 * $ xfs_io -c "falloc 0 4K" file
128 * $ xfs_io -c "pwrite -S 0xcd 4K 4K"
130 * So what's in the range [500, 4095] corresponds to zeroes.
132 if (datal < block_size) {
133 char *map;
135 map = kmap(page);
136 memset(map + datal, 0, block_size - datal);
137 flush_dcache_page(page);
138 kunmap(page);
141 SetPageUptodate(page);
142 ClearPageChecked(page);
143 set_page_dirty(page);
144 out_unlock:
145 if (page) {
146 unlock_page(page);
147 put_page(page);
149 if (ret)
150 btrfs_delalloc_release_space(inode, data_reserved, file_offset,
151 block_size, true);
152 btrfs_delalloc_release_extents(inode, block_size);
153 out:
154 extent_changeset_free(data_reserved);
156 return ret;
160 * Deal with cloning of inline extents. We try to copy the inline extent from
161 * the source inode to destination inode when possible. When not possible we
162 * copy the inline extent's data into the respective page of the inode.
164 static int clone_copy_inline_extent(struct inode *dst,
165 struct btrfs_path *path,
166 struct btrfs_key *new_key,
167 const u64 drop_start,
168 const u64 datal,
169 const u64 size,
170 const u8 comp_type,
171 char *inline_data,
172 struct btrfs_trans_handle **trans_out)
174 struct btrfs_fs_info *fs_info = btrfs_sb(dst->i_sb);
175 struct btrfs_root *root = BTRFS_I(dst)->root;
176 const u64 aligned_end = ALIGN(new_key->offset + datal,
177 fs_info->sectorsize);
178 struct btrfs_trans_handle *trans = NULL;
179 struct btrfs_drop_extents_args drop_args = { 0 };
180 int ret;
181 struct btrfs_key key;
183 if (new_key->offset > 0) {
184 ret = copy_inline_to_page(BTRFS_I(dst), new_key->offset,
185 inline_data, size, datal, comp_type);
186 goto out;
189 key.objectid = btrfs_ino(BTRFS_I(dst));
190 key.type = BTRFS_EXTENT_DATA_KEY;
191 key.offset = 0;
192 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
193 if (ret < 0) {
194 return ret;
195 } else if (ret > 0) {
196 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
197 ret = btrfs_next_leaf(root, path);
198 if (ret < 0)
199 return ret;
200 else if (ret > 0)
201 goto copy_inline_extent;
203 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
204 if (key.objectid == btrfs_ino(BTRFS_I(dst)) &&
205 key.type == BTRFS_EXTENT_DATA_KEY) {
207 * There's an implicit hole at file offset 0, copy the
208 * inline extent's data to the page.
210 ASSERT(key.offset > 0);
211 ret = copy_inline_to_page(BTRFS_I(dst), new_key->offset,
212 inline_data, size, datal,
213 comp_type);
214 goto out;
216 } else if (i_size_read(dst) <= datal) {
217 struct btrfs_file_extent_item *ei;
219 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
220 struct btrfs_file_extent_item);
222 * If it's an inline extent replace it with the source inline
223 * extent, otherwise copy the source inline extent data into
224 * the respective page at the destination inode.
226 if (btrfs_file_extent_type(path->nodes[0], ei) ==
227 BTRFS_FILE_EXTENT_INLINE)
228 goto copy_inline_extent;
230 ret = copy_inline_to_page(BTRFS_I(dst), new_key->offset,
231 inline_data, size, datal, comp_type);
232 goto out;
235 copy_inline_extent:
236 ret = 0;
238 * We have no extent items, or we have an extent at offset 0 which may
239 * or may not be inlined. All these cases are dealt the same way.
241 if (i_size_read(dst) > datal) {
243 * At the destination offset 0 we have either a hole, a regular
244 * extent or an inline extent larger then the one we want to
245 * clone. Deal with all these cases by copying the inline extent
246 * data into the respective page at the destination inode.
248 ret = copy_inline_to_page(BTRFS_I(dst), new_key->offset,
249 inline_data, size, datal, comp_type);
250 goto out;
253 btrfs_release_path(path);
255 * If we end up here it means were copy the inline extent into a leaf
256 * of the destination inode. We know we will drop or adjust at most one
257 * extent item in the destination root.
259 * 1 unit - adjusting old extent (we may have to split it)
260 * 1 unit - add new extent
261 * 1 unit - inode update
263 trans = btrfs_start_transaction(root, 3);
264 if (IS_ERR(trans)) {
265 ret = PTR_ERR(trans);
266 trans = NULL;
267 goto out;
269 drop_args.path = path;
270 drop_args.start = drop_start;
271 drop_args.end = aligned_end;
272 drop_args.drop_cache = true;
273 ret = btrfs_drop_extents(trans, root, BTRFS_I(dst), &drop_args);
274 if (ret)
275 goto out;
276 ret = btrfs_insert_empty_item(trans, root, path, new_key, size);
277 if (ret)
278 goto out;
280 write_extent_buffer(path->nodes[0], inline_data,
281 btrfs_item_ptr_offset(path->nodes[0],
282 path->slots[0]),
283 size);
284 btrfs_update_inode_bytes(BTRFS_I(dst), datal, drop_args.bytes_found);
285 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(dst)->runtime_flags);
286 ret = btrfs_inode_set_file_extent_range(BTRFS_I(dst), 0, aligned_end);
287 out:
288 if (!ret && !trans) {
290 * No transaction here means we copied the inline extent into a
291 * page of the destination inode.
293 * 1 unit to update inode item
295 trans = btrfs_start_transaction(root, 1);
296 if (IS_ERR(trans)) {
297 ret = PTR_ERR(trans);
298 trans = NULL;
301 if (ret && trans) {
302 btrfs_abort_transaction(trans, ret);
303 btrfs_end_transaction(trans);
305 if (!ret)
306 *trans_out = trans;
308 return ret;
312 * btrfs_clone() - clone a range from inode file to another
314 * @src: Inode to clone from
315 * @inode: Inode to clone to
316 * @off: Offset within source to start clone from
317 * @olen: Original length, passed by user, of range to clone
318 * @olen_aligned: Block-aligned value of olen
319 * @destoff: Offset within @inode to start clone
320 * @no_time_update: Whether to update mtime/ctime on the target inode
322 static int btrfs_clone(struct inode *src, struct inode *inode,
323 const u64 off, const u64 olen, const u64 olen_aligned,
324 const u64 destoff, int no_time_update)
326 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
327 struct btrfs_path *path = NULL;
328 struct extent_buffer *leaf;
329 struct btrfs_trans_handle *trans;
330 char *buf = NULL;
331 struct btrfs_key key;
332 u32 nritems;
333 int slot;
334 int ret;
335 const u64 len = olen_aligned;
336 u64 last_dest_end = destoff;
338 ret = -ENOMEM;
339 buf = kvmalloc(fs_info->nodesize, GFP_KERNEL);
340 if (!buf)
341 return ret;
343 path = btrfs_alloc_path();
344 if (!path) {
345 kvfree(buf);
346 return ret;
349 path->reada = READA_FORWARD;
350 /* Clone data */
351 key.objectid = btrfs_ino(BTRFS_I(src));
352 key.type = BTRFS_EXTENT_DATA_KEY;
353 key.offset = off;
355 while (1) {
356 u64 next_key_min_offset = key.offset + 1;
357 struct btrfs_file_extent_item *extent;
358 u64 extent_gen;
359 int type;
360 u32 size;
361 struct btrfs_key new_key;
362 u64 disko = 0, diskl = 0;
363 u64 datao = 0, datal = 0;
364 u8 comp;
365 u64 drop_start;
367 /* Note the key will change type as we walk through the tree */
368 ret = btrfs_search_slot(NULL, BTRFS_I(src)->root, &key, path,
369 0, 0);
370 if (ret < 0)
371 goto out;
373 * First search, if no extent item that starts at offset off was
374 * found but the previous item is an extent item, it's possible
375 * it might overlap our target range, therefore process it.
377 if (key.offset == off && ret > 0 && path->slots[0] > 0) {
378 btrfs_item_key_to_cpu(path->nodes[0], &key,
379 path->slots[0] - 1);
380 if (key.type == BTRFS_EXTENT_DATA_KEY)
381 path->slots[0]--;
384 nritems = btrfs_header_nritems(path->nodes[0]);
385 process_slot:
386 if (path->slots[0] >= nritems) {
387 ret = btrfs_next_leaf(BTRFS_I(src)->root, path);
388 if (ret < 0)
389 goto out;
390 if (ret > 0)
391 break;
392 nritems = btrfs_header_nritems(path->nodes[0]);
394 leaf = path->nodes[0];
395 slot = path->slots[0];
397 btrfs_item_key_to_cpu(leaf, &key, slot);
398 if (key.type > BTRFS_EXTENT_DATA_KEY ||
399 key.objectid != btrfs_ino(BTRFS_I(src)))
400 break;
402 ASSERT(key.type == BTRFS_EXTENT_DATA_KEY);
404 extent = btrfs_item_ptr(leaf, slot,
405 struct btrfs_file_extent_item);
406 extent_gen = btrfs_file_extent_generation(leaf, extent);
407 comp = btrfs_file_extent_compression(leaf, extent);
408 type = btrfs_file_extent_type(leaf, extent);
409 if (type == BTRFS_FILE_EXTENT_REG ||
410 type == BTRFS_FILE_EXTENT_PREALLOC) {
411 disko = btrfs_file_extent_disk_bytenr(leaf, extent);
412 diskl = btrfs_file_extent_disk_num_bytes(leaf, extent);
413 datao = btrfs_file_extent_offset(leaf, extent);
414 datal = btrfs_file_extent_num_bytes(leaf, extent);
415 } else if (type == BTRFS_FILE_EXTENT_INLINE) {
416 /* Take upper bound, may be compressed */
417 datal = btrfs_file_extent_ram_bytes(leaf, extent);
421 * The first search might have left us at an extent item that
422 * ends before our target range's start, can happen if we have
423 * holes and NO_HOLES feature enabled.
425 if (key.offset + datal <= off) {
426 path->slots[0]++;
427 goto process_slot;
428 } else if (key.offset >= off + len) {
429 break;
431 next_key_min_offset = key.offset + datal;
432 size = btrfs_item_size_nr(leaf, slot);
433 read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf, slot),
434 size);
436 btrfs_release_path(path);
438 memcpy(&new_key, &key, sizeof(new_key));
439 new_key.objectid = btrfs_ino(BTRFS_I(inode));
440 if (off <= key.offset)
441 new_key.offset = key.offset + destoff - off;
442 else
443 new_key.offset = destoff;
446 * Deal with a hole that doesn't have an extent item that
447 * represents it (NO_HOLES feature enabled).
448 * This hole is either in the middle of the cloning range or at
449 * the beginning (fully overlaps it or partially overlaps it).
451 if (new_key.offset != last_dest_end)
452 drop_start = last_dest_end;
453 else
454 drop_start = new_key.offset;
456 if (type == BTRFS_FILE_EXTENT_REG ||
457 type == BTRFS_FILE_EXTENT_PREALLOC) {
458 struct btrfs_replace_extent_info clone_info;
461 * a | --- range to clone ---| b
462 * | ------------- extent ------------- |
465 /* Subtract range b */
466 if (key.offset + datal > off + len)
467 datal = off + len - key.offset;
469 /* Subtract range a */
470 if (off > key.offset) {
471 datao += off - key.offset;
472 datal -= off - key.offset;
475 clone_info.disk_offset = disko;
476 clone_info.disk_len = diskl;
477 clone_info.data_offset = datao;
478 clone_info.data_len = datal;
479 clone_info.file_offset = new_key.offset;
480 clone_info.extent_buf = buf;
481 clone_info.is_new_extent = false;
482 ret = btrfs_replace_file_extents(inode, path, drop_start,
483 new_key.offset + datal - 1, &clone_info,
484 &trans);
485 if (ret)
486 goto out;
487 } else if (type == BTRFS_FILE_EXTENT_INLINE) {
489 * Inline extents always have to start at file offset 0
490 * and can never be bigger then the sector size. We can
491 * never clone only parts of an inline extent, since all
492 * reflink operations must start at a sector size aligned
493 * offset, and the length must be aligned too or end at
494 * the i_size (which implies the whole inlined data).
496 ASSERT(key.offset == 0);
497 ASSERT(datal <= fs_info->sectorsize);
498 if (key.offset != 0 || datal > fs_info->sectorsize)
499 return -EUCLEAN;
501 ret = clone_copy_inline_extent(inode, path, &new_key,
502 drop_start, datal, size,
503 comp, buf, &trans);
504 if (ret)
505 goto out;
508 btrfs_release_path(path);
511 * If this is a new extent update the last_reflink_trans of both
512 * inodes. This is used by fsync to make sure it does not log
513 * multiple checksum items with overlapping ranges. For older
514 * extents we don't need to do it since inode logging skips the
515 * checksums for older extents. Also ignore holes and inline
516 * extents because they don't have checksums in the csum tree.
518 if (extent_gen == trans->transid && disko > 0) {
519 BTRFS_I(src)->last_reflink_trans = trans->transid;
520 BTRFS_I(inode)->last_reflink_trans = trans->transid;
523 last_dest_end = ALIGN(new_key.offset + datal,
524 fs_info->sectorsize);
525 ret = clone_finish_inode_update(trans, inode, last_dest_end,
526 destoff, olen, no_time_update);
527 if (ret)
528 goto out;
529 if (new_key.offset + datal >= destoff + len)
530 break;
532 btrfs_release_path(path);
533 key.offset = next_key_min_offset;
535 if (fatal_signal_pending(current)) {
536 ret = -EINTR;
537 goto out;
540 cond_resched();
542 ret = 0;
544 if (last_dest_end < destoff + len) {
546 * We have an implicit hole that fully or partially overlaps our
547 * cloning range at its end. This means that we either have the
548 * NO_HOLES feature enabled or the implicit hole happened due to
549 * mixing buffered and direct IO writes against this file.
551 btrfs_release_path(path);
553 ret = btrfs_replace_file_extents(inode, path, last_dest_end,
554 destoff + len - 1, NULL, &trans);
555 if (ret)
556 goto out;
558 ret = clone_finish_inode_update(trans, inode, destoff + len,
559 destoff, olen, no_time_update);
562 out:
563 btrfs_free_path(path);
564 kvfree(buf);
565 clear_bit(BTRFS_INODE_NO_DELALLOC_FLUSH, &BTRFS_I(inode)->runtime_flags);
567 return ret;
570 static void btrfs_double_extent_unlock(struct inode *inode1, u64 loff1,
571 struct inode *inode2, u64 loff2, u64 len)
573 unlock_extent(&BTRFS_I(inode1)->io_tree, loff1, loff1 + len - 1);
574 unlock_extent(&BTRFS_I(inode2)->io_tree, loff2, loff2 + len - 1);
577 static void btrfs_double_extent_lock(struct inode *inode1, u64 loff1,
578 struct inode *inode2, u64 loff2, u64 len)
580 if (inode1 < inode2) {
581 swap(inode1, inode2);
582 swap(loff1, loff2);
583 } else if (inode1 == inode2 && loff2 < loff1) {
584 swap(loff1, loff2);
586 lock_extent(&BTRFS_I(inode1)->io_tree, loff1, loff1 + len - 1);
587 lock_extent(&BTRFS_I(inode2)->io_tree, loff2, loff2 + len - 1);
590 static int btrfs_extent_same_range(struct inode *src, u64 loff, u64 len,
591 struct inode *dst, u64 dst_loff)
593 const u64 bs = BTRFS_I(src)->root->fs_info->sb->s_blocksize;
594 int ret;
597 * Lock destination range to serialize with concurrent readpages() and
598 * source range to serialize with relocation.
600 btrfs_double_extent_lock(src, loff, dst, dst_loff, len);
601 ret = btrfs_clone(src, dst, loff, len, ALIGN(len, bs), dst_loff, 1);
602 btrfs_double_extent_unlock(src, loff, dst, dst_loff, len);
604 return ret;
607 static int btrfs_extent_same(struct inode *src, u64 loff, u64 olen,
608 struct inode *dst, u64 dst_loff)
610 int ret;
611 u64 i, tail_len, chunk_count;
612 struct btrfs_root *root_dst = BTRFS_I(dst)->root;
614 spin_lock(&root_dst->root_item_lock);
615 if (root_dst->send_in_progress) {
616 btrfs_warn_rl(root_dst->fs_info,
617 "cannot deduplicate to root %llu while send operations are using it (%d in progress)",
618 root_dst->root_key.objectid,
619 root_dst->send_in_progress);
620 spin_unlock(&root_dst->root_item_lock);
621 return -EAGAIN;
623 root_dst->dedupe_in_progress++;
624 spin_unlock(&root_dst->root_item_lock);
626 tail_len = olen % BTRFS_MAX_DEDUPE_LEN;
627 chunk_count = div_u64(olen, BTRFS_MAX_DEDUPE_LEN);
629 for (i = 0; i < chunk_count; i++) {
630 ret = btrfs_extent_same_range(src, loff, BTRFS_MAX_DEDUPE_LEN,
631 dst, dst_loff);
632 if (ret)
633 goto out;
635 loff += BTRFS_MAX_DEDUPE_LEN;
636 dst_loff += BTRFS_MAX_DEDUPE_LEN;
639 if (tail_len > 0)
640 ret = btrfs_extent_same_range(src, loff, tail_len, dst, dst_loff);
641 out:
642 spin_lock(&root_dst->root_item_lock);
643 root_dst->dedupe_in_progress--;
644 spin_unlock(&root_dst->root_item_lock);
646 return ret;
649 static noinline int btrfs_clone_files(struct file *file, struct file *file_src,
650 u64 off, u64 olen, u64 destoff)
652 struct inode *inode = file_inode(file);
653 struct inode *src = file_inode(file_src);
654 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
655 int ret;
656 int wb_ret;
657 u64 len = olen;
658 u64 bs = fs_info->sb->s_blocksize;
661 * VFS's generic_remap_file_range_prep() protects us from cloning the
662 * eof block into the middle of a file, which would result in corruption
663 * if the file size is not blocksize aligned. So we don't need to check
664 * for that case here.
666 if (off + len == src->i_size)
667 len = ALIGN(src->i_size, bs) - off;
669 if (destoff > inode->i_size) {
670 const u64 wb_start = ALIGN_DOWN(inode->i_size, bs);
672 ret = btrfs_cont_expand(BTRFS_I(inode), inode->i_size, destoff);
673 if (ret)
674 return ret;
676 * We may have truncated the last block if the inode's size is
677 * not sector size aligned, so we need to wait for writeback to
678 * complete before proceeding further, otherwise we can race
679 * with cloning and attempt to increment a reference to an
680 * extent that no longer exists (writeback completed right after
681 * we found the previous extent covering eof and before we
682 * attempted to increment its reference count).
684 ret = btrfs_wait_ordered_range(inode, wb_start,
685 destoff - wb_start);
686 if (ret)
687 return ret;
691 * Lock destination range to serialize with concurrent readpages() and
692 * source range to serialize with relocation.
694 btrfs_double_extent_lock(src, off, inode, destoff, len);
695 ret = btrfs_clone(src, inode, off, olen, len, destoff, 0);
696 btrfs_double_extent_unlock(src, off, inode, destoff, len);
699 * We may have copied an inline extent into a page of the destination
700 * range, so wait for writeback to complete before truncating pages
701 * from the page cache. This is a rare case.
703 wb_ret = btrfs_wait_ordered_range(inode, destoff, len);
704 ret = ret ? ret : wb_ret;
706 * Truncate page cache pages so that future reads will see the cloned
707 * data immediately and not the previous data.
709 truncate_inode_pages_range(&inode->i_data,
710 round_down(destoff, PAGE_SIZE),
711 round_up(destoff + len, PAGE_SIZE) - 1);
713 return ret;
716 static int btrfs_remap_file_range_prep(struct file *file_in, loff_t pos_in,
717 struct file *file_out, loff_t pos_out,
718 loff_t *len, unsigned int remap_flags)
720 struct inode *inode_in = file_inode(file_in);
721 struct inode *inode_out = file_inode(file_out);
722 u64 bs = BTRFS_I(inode_out)->root->fs_info->sb->s_blocksize;
723 bool same_inode = inode_out == inode_in;
724 u64 wb_len;
725 int ret;
727 if (!(remap_flags & REMAP_FILE_DEDUP)) {
728 struct btrfs_root *root_out = BTRFS_I(inode_out)->root;
730 if (btrfs_root_readonly(root_out))
731 return -EROFS;
733 if (file_in->f_path.mnt != file_out->f_path.mnt ||
734 inode_in->i_sb != inode_out->i_sb)
735 return -EXDEV;
738 /* Don't make the dst file partly checksummed */
739 if ((BTRFS_I(inode_in)->flags & BTRFS_INODE_NODATASUM) !=
740 (BTRFS_I(inode_out)->flags & BTRFS_INODE_NODATASUM)) {
741 return -EINVAL;
745 * Now that the inodes are locked, we need to start writeback ourselves
746 * and can not rely on the writeback from the VFS's generic helper
747 * generic_remap_file_range_prep() because:
749 * 1) For compression we must call filemap_fdatawrite_range() range
750 * twice (btrfs_fdatawrite_range() does it for us), and the generic
751 * helper only calls it once;
753 * 2) filemap_fdatawrite_range(), called by the generic helper only
754 * waits for the writeback to complete, i.e. for IO to be done, and
755 * not for the ordered extents to complete. We need to wait for them
756 * to complete so that new file extent items are in the fs tree.
758 if (*len == 0 && !(remap_flags & REMAP_FILE_DEDUP))
759 wb_len = ALIGN(inode_in->i_size, bs) - ALIGN_DOWN(pos_in, bs);
760 else
761 wb_len = ALIGN(*len, bs);
764 * Since we don't lock ranges, wait for ongoing lockless dio writes (as
765 * any in progress could create its ordered extents after we wait for
766 * existing ordered extents below).
768 inode_dio_wait(inode_in);
769 if (!same_inode)
770 inode_dio_wait(inode_out);
773 * Workaround to make sure NOCOW buffered write reach disk as NOCOW.
775 * Btrfs' back references do not have a block level granularity, they
776 * work at the whole extent level.
777 * NOCOW buffered write without data space reserved may not be able
778 * to fall back to CoW due to lack of data space, thus could cause
779 * data loss.
781 * Here we take a shortcut by flushing the whole inode, so that all
782 * nocow write should reach disk as nocow before we increase the
783 * reference of the extent. We could do better by only flushing NOCOW
784 * data, but that needs extra accounting.
786 * Also we don't need to check ASYNC_EXTENT, as async extent will be
787 * CoWed anyway, not affecting nocow part.
789 ret = filemap_flush(inode_in->i_mapping);
790 if (ret < 0)
791 return ret;
793 ret = btrfs_wait_ordered_range(inode_in, ALIGN_DOWN(pos_in, bs),
794 wb_len);
795 if (ret < 0)
796 return ret;
797 ret = btrfs_wait_ordered_range(inode_out, ALIGN_DOWN(pos_out, bs),
798 wb_len);
799 if (ret < 0)
800 return ret;
802 return generic_remap_file_range_prep(file_in, pos_in, file_out, pos_out,
803 len, remap_flags);
806 loff_t btrfs_remap_file_range(struct file *src_file, loff_t off,
807 struct file *dst_file, loff_t destoff, loff_t len,
808 unsigned int remap_flags)
810 struct inode *src_inode = file_inode(src_file);
811 struct inode *dst_inode = file_inode(dst_file);
812 bool same_inode = dst_inode == src_inode;
813 int ret;
815 if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY))
816 return -EINVAL;
818 if (same_inode)
819 inode_lock(src_inode);
820 else
821 lock_two_nondirectories(src_inode, dst_inode);
823 ret = btrfs_remap_file_range_prep(src_file, off, dst_file, destoff,
824 &len, remap_flags);
825 if (ret < 0 || len == 0)
826 goto out_unlock;
828 if (remap_flags & REMAP_FILE_DEDUP)
829 ret = btrfs_extent_same(src_inode, off, len, dst_inode, destoff);
830 else
831 ret = btrfs_clone_files(dst_file, src_file, off, len, destoff);
833 out_unlock:
834 if (same_inode)
835 inode_unlock(src_inode);
836 else
837 unlock_two_nondirectories(src_inode, dst_inode);
839 return ret < 0 ? ret : len;