1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2012 Alexander Block. All rights reserved.
6 #include <linux/bsearch.h>
8 #include <linux/file.h>
9 #include <linux/sort.h>
10 #include <linux/mount.h>
11 #include <linux/xattr.h>
12 #include <linux/posix_acl_xattr.h>
13 #include <linux/radix-tree.h>
14 #include <linux/vmalloc.h>
15 #include <linux/string.h>
16 #include <linux/compat.h>
17 #include <linux/crc32c.h>
23 #include "btrfs_inode.h"
24 #include "transaction.h"
25 #include "compression.h"
28 * A fs_path is a helper to dynamically build path names with unknown size.
29 * It reallocates the internal buffer on demand.
30 * It allows fast adding of path elements on the right side (normal path) and
31 * fast adding to the left side (reversed path). A reversed path can also be
32 * unreversed if needed.
41 unsigned short buf_len
:15;
42 unsigned short reversed
:1;
46 * Average path length does not exceed 200 bytes, we'll have
47 * better packing in the slab and higher chance to satisfy
48 * a allocation later during send.
53 #define FS_PATH_INLINE_SIZE \
54 (sizeof(struct fs_path) - offsetof(struct fs_path, inline_buf))
57 /* reused for each extent */
59 struct btrfs_root
*root
;
66 #define SEND_CTX_MAX_NAME_CACHE_SIZE 128
67 #define SEND_CTX_NAME_CACHE_CLEAN_SIZE (SEND_CTX_MAX_NAME_CACHE_SIZE * 2)
70 struct file
*send_filp
;
76 u64 cmd_send_size
[BTRFS_SEND_C_MAX
+ 1];
77 u64 flags
; /* 'flags' member of btrfs_ioctl_send_args is u64 */
79 struct btrfs_root
*send_root
;
80 struct btrfs_root
*parent_root
;
81 struct clone_root
*clone_roots
;
84 /* current state of the compare_tree call */
85 struct btrfs_path
*left_path
;
86 struct btrfs_path
*right_path
;
87 struct btrfs_key
*cmp_key
;
90 * infos of the currently processed inode. In case of deleted inodes,
91 * these are the values from the deleted inode.
96 int cur_inode_new_gen
;
97 int cur_inode_deleted
;
101 u64 cur_inode_last_extent
;
102 u64 cur_inode_next_write_offset
;
103 bool ignore_cur_inode
;
107 struct list_head new_refs
;
108 struct list_head deleted_refs
;
110 struct radix_tree_root name_cache
;
111 struct list_head name_cache_list
;
114 struct file_ra_state ra
;
119 * We process inodes by their increasing order, so if before an
120 * incremental send we reverse the parent/child relationship of
121 * directories such that a directory with a lower inode number was
122 * the parent of a directory with a higher inode number, and the one
123 * becoming the new parent got renamed too, we can't rename/move the
124 * directory with lower inode number when we finish processing it - we
125 * must process the directory with higher inode number first, then
126 * rename/move it and then rename/move the directory with lower inode
127 * number. Example follows.
129 * Tree state when the first send was performed:
141 * Tree state when the second (incremental) send is performed:
150 * The sequence of steps that lead to the second state was:
152 * mv /a/b/c/d /a/b/c2/d2
153 * mv /a/b/c /a/b/c2/d2/cc
155 * "c" has lower inode number, but we can't move it (2nd mv operation)
156 * before we move "d", which has higher inode number.
158 * So we just memorize which move/rename operations must be performed
159 * later when their respective parent is processed and moved/renamed.
162 /* Indexed by parent directory inode number. */
163 struct rb_root pending_dir_moves
;
166 * Reverse index, indexed by the inode number of a directory that
167 * is waiting for the move/rename of its immediate parent before its
168 * own move/rename can be performed.
170 struct rb_root waiting_dir_moves
;
173 * A directory that is going to be rm'ed might have a child directory
174 * which is in the pending directory moves index above. In this case,
175 * the directory can only be removed after the move/rename of its child
176 * is performed. Example:
196 * Sequence of steps that lead to the send snapshot:
197 * rm -f /a/b/c/foo.txt
199 * mv /a/b/c/x /a/b/YY
202 * When the child is processed, its move/rename is delayed until its
203 * parent is processed (as explained above), but all other operations
204 * like update utimes, chown, chgrp, etc, are performed and the paths
205 * that it uses for those operations must use the orphanized name of
206 * its parent (the directory we're going to rm later), so we need to
207 * memorize that name.
209 * Indexed by the inode number of the directory to be deleted.
211 struct rb_root orphan_dirs
;
214 struct pending_dir_move
{
216 struct list_head list
;
220 struct list_head update_refs
;
223 struct waiting_dir_move
{
227 * There might be some directory that could not be removed because it
228 * was waiting for this directory inode to be moved first. Therefore
229 * after this directory is moved, we can try to rmdir the ino rmdir_ino.
235 struct orphan_dir_info
{
239 u64 last_dir_index_offset
;
242 struct name_cache_entry
{
243 struct list_head list
;
245 * radix_tree has only 32bit entries but we need to handle 64bit inums.
246 * We use the lower 32bit of the 64bit inum to store it in the tree. If
247 * more then one inum would fall into the same entry, we use radix_list
248 * to store the additional entries. radix_list is also used to store
249 * entries where two entries have the same inum but different
252 struct list_head radix_list
;
258 int need_later_update
;
264 static void inconsistent_snapshot_error(struct send_ctx
*sctx
,
265 enum btrfs_compare_tree_result result
,
268 const char *result_string
;
271 case BTRFS_COMPARE_TREE_NEW
:
272 result_string
= "new";
274 case BTRFS_COMPARE_TREE_DELETED
:
275 result_string
= "deleted";
277 case BTRFS_COMPARE_TREE_CHANGED
:
278 result_string
= "updated";
280 case BTRFS_COMPARE_TREE_SAME
:
282 result_string
= "unchanged";
286 result_string
= "unexpected";
289 btrfs_err(sctx
->send_root
->fs_info
,
290 "Send: inconsistent snapshot, found %s %s for inode %llu without updated inode item, send root is %llu, parent root is %llu",
291 result_string
, what
, sctx
->cmp_key
->objectid
,
292 sctx
->send_root
->root_key
.objectid
,
294 sctx
->parent_root
->root_key
.objectid
: 0));
297 static int is_waiting_for_move(struct send_ctx
*sctx
, u64 ino
);
299 static struct waiting_dir_move
*
300 get_waiting_dir_move(struct send_ctx
*sctx
, u64 ino
);
302 static int is_waiting_for_rm(struct send_ctx
*sctx
, u64 dir_ino
);
304 static int need_send_hole(struct send_ctx
*sctx
)
306 return (sctx
->parent_root
&& !sctx
->cur_inode_new
&&
307 !sctx
->cur_inode_new_gen
&& !sctx
->cur_inode_deleted
&&
308 S_ISREG(sctx
->cur_inode_mode
));
311 static void fs_path_reset(struct fs_path
*p
)
314 p
->start
= p
->buf
+ p
->buf_len
- 1;
324 static struct fs_path
*fs_path_alloc(void)
328 p
= kmalloc(sizeof(*p
), GFP_KERNEL
);
332 p
->buf
= p
->inline_buf
;
333 p
->buf_len
= FS_PATH_INLINE_SIZE
;
338 static struct fs_path
*fs_path_alloc_reversed(void)
350 static void fs_path_free(struct fs_path
*p
)
354 if (p
->buf
!= p
->inline_buf
)
359 static int fs_path_len(struct fs_path
*p
)
361 return p
->end
- p
->start
;
364 static int fs_path_ensure_buf(struct fs_path
*p
, int len
)
372 if (p
->buf_len
>= len
)
375 if (len
> PATH_MAX
) {
380 path_len
= p
->end
- p
->start
;
381 old_buf_len
= p
->buf_len
;
384 * First time the inline_buf does not suffice
386 if (p
->buf
== p
->inline_buf
) {
387 tmp_buf
= kmalloc(len
, GFP_KERNEL
);
389 memcpy(tmp_buf
, p
->buf
, old_buf_len
);
391 tmp_buf
= krealloc(p
->buf
, len
, GFP_KERNEL
);
397 * The real size of the buffer is bigger, this will let the fast path
398 * happen most of the time
400 p
->buf_len
= ksize(p
->buf
);
403 tmp_buf
= p
->buf
+ old_buf_len
- path_len
- 1;
404 p
->end
= p
->buf
+ p
->buf_len
- 1;
405 p
->start
= p
->end
- path_len
;
406 memmove(p
->start
, tmp_buf
, path_len
+ 1);
409 p
->end
= p
->start
+ path_len
;
414 static int fs_path_prepare_for_add(struct fs_path
*p
, int name_len
,
420 new_len
= p
->end
- p
->start
+ name_len
;
421 if (p
->start
!= p
->end
)
423 ret
= fs_path_ensure_buf(p
, new_len
);
428 if (p
->start
!= p
->end
)
430 p
->start
-= name_len
;
431 *prepared
= p
->start
;
433 if (p
->start
!= p
->end
)
444 static int fs_path_add(struct fs_path
*p
, const char *name
, int name_len
)
449 ret
= fs_path_prepare_for_add(p
, name_len
, &prepared
);
452 memcpy(prepared
, name
, name_len
);
458 static int fs_path_add_path(struct fs_path
*p
, struct fs_path
*p2
)
463 ret
= fs_path_prepare_for_add(p
, p2
->end
- p2
->start
, &prepared
);
466 memcpy(prepared
, p2
->start
, p2
->end
- p2
->start
);
472 static int fs_path_add_from_extent_buffer(struct fs_path
*p
,
473 struct extent_buffer
*eb
,
474 unsigned long off
, int len
)
479 ret
= fs_path_prepare_for_add(p
, len
, &prepared
);
483 read_extent_buffer(eb
, prepared
, off
, len
);
489 static int fs_path_copy(struct fs_path
*p
, struct fs_path
*from
)
493 p
->reversed
= from
->reversed
;
496 ret
= fs_path_add_path(p
, from
);
502 static void fs_path_unreverse(struct fs_path
*p
)
511 len
= p
->end
- p
->start
;
513 p
->end
= p
->start
+ len
;
514 memmove(p
->start
, tmp
, len
+ 1);
518 static struct btrfs_path
*alloc_path_for_send(void)
520 struct btrfs_path
*path
;
522 path
= btrfs_alloc_path();
525 path
->search_commit_root
= 1;
526 path
->skip_locking
= 1;
527 path
->need_commit_sem
= 1;
531 static int write_buf(struct file
*filp
, const void *buf
, u32 len
, loff_t
*off
)
537 ret
= kernel_write(filp
, buf
+ pos
, len
- pos
, off
);
538 /* TODO handle that correctly */
539 /*if (ret == -ERESTARTSYS) {
553 static int tlv_put(struct send_ctx
*sctx
, u16 attr
, const void *data
, int len
)
555 struct btrfs_tlv_header
*hdr
;
556 int total_len
= sizeof(*hdr
) + len
;
557 int left
= sctx
->send_max_size
- sctx
->send_size
;
559 if (unlikely(left
< total_len
))
562 hdr
= (struct btrfs_tlv_header
*) (sctx
->send_buf
+ sctx
->send_size
);
563 hdr
->tlv_type
= cpu_to_le16(attr
);
564 hdr
->tlv_len
= cpu_to_le16(len
);
565 memcpy(hdr
+ 1, data
, len
);
566 sctx
->send_size
+= total_len
;
571 #define TLV_PUT_DEFINE_INT(bits) \
572 static int tlv_put_u##bits(struct send_ctx *sctx, \
573 u##bits attr, u##bits value) \
575 __le##bits __tmp = cpu_to_le##bits(value); \
576 return tlv_put(sctx, attr, &__tmp, sizeof(__tmp)); \
579 TLV_PUT_DEFINE_INT(64)
581 static int tlv_put_string(struct send_ctx
*sctx
, u16 attr
,
582 const char *str
, int len
)
586 return tlv_put(sctx
, attr
, str
, len
);
589 static int tlv_put_uuid(struct send_ctx
*sctx
, u16 attr
,
592 return tlv_put(sctx
, attr
, uuid
, BTRFS_UUID_SIZE
);
595 static int tlv_put_btrfs_timespec(struct send_ctx
*sctx
, u16 attr
,
596 struct extent_buffer
*eb
,
597 struct btrfs_timespec
*ts
)
599 struct btrfs_timespec bts
;
600 read_extent_buffer(eb
, &bts
, (unsigned long)ts
, sizeof(bts
));
601 return tlv_put(sctx
, attr
, &bts
, sizeof(bts
));
605 #define TLV_PUT(sctx, attrtype, data, attrlen) \
607 ret = tlv_put(sctx, attrtype, data, attrlen); \
609 goto tlv_put_failure; \
612 #define TLV_PUT_INT(sctx, attrtype, bits, value) \
614 ret = tlv_put_u##bits(sctx, attrtype, value); \
616 goto tlv_put_failure; \
619 #define TLV_PUT_U8(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 8, data)
620 #define TLV_PUT_U16(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 16, data)
621 #define TLV_PUT_U32(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 32, data)
622 #define TLV_PUT_U64(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 64, data)
623 #define TLV_PUT_STRING(sctx, attrtype, str, len) \
625 ret = tlv_put_string(sctx, attrtype, str, len); \
627 goto tlv_put_failure; \
629 #define TLV_PUT_PATH(sctx, attrtype, p) \
631 ret = tlv_put_string(sctx, attrtype, p->start, \
632 p->end - p->start); \
634 goto tlv_put_failure; \
636 #define TLV_PUT_UUID(sctx, attrtype, uuid) \
638 ret = tlv_put_uuid(sctx, attrtype, uuid); \
640 goto tlv_put_failure; \
642 #define TLV_PUT_BTRFS_TIMESPEC(sctx, attrtype, eb, ts) \
644 ret = tlv_put_btrfs_timespec(sctx, attrtype, eb, ts); \
646 goto tlv_put_failure; \
649 static int send_header(struct send_ctx
*sctx
)
651 struct btrfs_stream_header hdr
;
653 strcpy(hdr
.magic
, BTRFS_SEND_STREAM_MAGIC
);
654 hdr
.version
= cpu_to_le32(BTRFS_SEND_STREAM_VERSION
);
656 return write_buf(sctx
->send_filp
, &hdr
, sizeof(hdr
),
661 * For each command/item we want to send to userspace, we call this function.
663 static int begin_cmd(struct send_ctx
*sctx
, int cmd
)
665 struct btrfs_cmd_header
*hdr
;
667 if (WARN_ON(!sctx
->send_buf
))
670 BUG_ON(sctx
->send_size
);
672 sctx
->send_size
+= sizeof(*hdr
);
673 hdr
= (struct btrfs_cmd_header
*)sctx
->send_buf
;
674 hdr
->cmd
= cpu_to_le16(cmd
);
679 static int send_cmd(struct send_ctx
*sctx
)
682 struct btrfs_cmd_header
*hdr
;
685 hdr
= (struct btrfs_cmd_header
*)sctx
->send_buf
;
686 hdr
->len
= cpu_to_le32(sctx
->send_size
- sizeof(*hdr
));
689 crc
= btrfs_crc32c(0, (unsigned char *)sctx
->send_buf
, sctx
->send_size
);
690 hdr
->crc
= cpu_to_le32(crc
);
692 ret
= write_buf(sctx
->send_filp
, sctx
->send_buf
, sctx
->send_size
,
695 sctx
->total_send_size
+= sctx
->send_size
;
696 sctx
->cmd_send_size
[le16_to_cpu(hdr
->cmd
)] += sctx
->send_size
;
703 * Sends a move instruction to user space
705 static int send_rename(struct send_ctx
*sctx
,
706 struct fs_path
*from
, struct fs_path
*to
)
708 struct btrfs_fs_info
*fs_info
= sctx
->send_root
->fs_info
;
711 btrfs_debug(fs_info
, "send_rename %s -> %s", from
->start
, to
->start
);
713 ret
= begin_cmd(sctx
, BTRFS_SEND_C_RENAME
);
717 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH
, from
);
718 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH_TO
, to
);
720 ret
= send_cmd(sctx
);
728 * Sends a link instruction to user space
730 static int send_link(struct send_ctx
*sctx
,
731 struct fs_path
*path
, struct fs_path
*lnk
)
733 struct btrfs_fs_info
*fs_info
= sctx
->send_root
->fs_info
;
736 btrfs_debug(fs_info
, "send_link %s -> %s", path
->start
, lnk
->start
);
738 ret
= begin_cmd(sctx
, BTRFS_SEND_C_LINK
);
742 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH
, path
);
743 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH_LINK
, lnk
);
745 ret
= send_cmd(sctx
);
753 * Sends an unlink instruction to user space
755 static int send_unlink(struct send_ctx
*sctx
, struct fs_path
*path
)
757 struct btrfs_fs_info
*fs_info
= sctx
->send_root
->fs_info
;
760 btrfs_debug(fs_info
, "send_unlink %s", path
->start
);
762 ret
= begin_cmd(sctx
, BTRFS_SEND_C_UNLINK
);
766 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH
, path
);
768 ret
= send_cmd(sctx
);
776 * Sends a rmdir instruction to user space
778 static int send_rmdir(struct send_ctx
*sctx
, struct fs_path
*path
)
780 struct btrfs_fs_info
*fs_info
= sctx
->send_root
->fs_info
;
783 btrfs_debug(fs_info
, "send_rmdir %s", path
->start
);
785 ret
= begin_cmd(sctx
, BTRFS_SEND_C_RMDIR
);
789 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH
, path
);
791 ret
= send_cmd(sctx
);
799 * Helper function to retrieve some fields from an inode item.
801 static int __get_inode_info(struct btrfs_root
*root
, struct btrfs_path
*path
,
802 u64 ino
, u64
*size
, u64
*gen
, u64
*mode
, u64
*uid
,
806 struct btrfs_inode_item
*ii
;
807 struct btrfs_key key
;
810 key
.type
= BTRFS_INODE_ITEM_KEY
;
812 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
819 ii
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
820 struct btrfs_inode_item
);
822 *size
= btrfs_inode_size(path
->nodes
[0], ii
);
824 *gen
= btrfs_inode_generation(path
->nodes
[0], ii
);
826 *mode
= btrfs_inode_mode(path
->nodes
[0], ii
);
828 *uid
= btrfs_inode_uid(path
->nodes
[0], ii
);
830 *gid
= btrfs_inode_gid(path
->nodes
[0], ii
);
832 *rdev
= btrfs_inode_rdev(path
->nodes
[0], ii
);
837 static int get_inode_info(struct btrfs_root
*root
,
838 u64 ino
, u64
*size
, u64
*gen
,
839 u64
*mode
, u64
*uid
, u64
*gid
,
842 struct btrfs_path
*path
;
845 path
= alloc_path_for_send();
848 ret
= __get_inode_info(root
, path
, ino
, size
, gen
, mode
, uid
, gid
,
850 btrfs_free_path(path
);
854 typedef int (*iterate_inode_ref_t
)(int num
, u64 dir
, int index
,
859 * Helper function to iterate the entries in ONE btrfs_inode_ref or
860 * btrfs_inode_extref.
861 * The iterate callback may return a non zero value to stop iteration. This can
862 * be a negative value for error codes or 1 to simply stop it.
864 * path must point to the INODE_REF or INODE_EXTREF when called.
866 static int iterate_inode_ref(struct btrfs_root
*root
, struct btrfs_path
*path
,
867 struct btrfs_key
*found_key
, int resolve
,
868 iterate_inode_ref_t iterate
, void *ctx
)
870 struct extent_buffer
*eb
= path
->nodes
[0];
871 struct btrfs_item
*item
;
872 struct btrfs_inode_ref
*iref
;
873 struct btrfs_inode_extref
*extref
;
874 struct btrfs_path
*tmp_path
;
878 int slot
= path
->slots
[0];
885 unsigned long name_off
;
886 unsigned long elem_size
;
889 p
= fs_path_alloc_reversed();
893 tmp_path
= alloc_path_for_send();
900 if (found_key
->type
== BTRFS_INODE_REF_KEY
) {
901 ptr
= (unsigned long)btrfs_item_ptr(eb
, slot
,
902 struct btrfs_inode_ref
);
903 item
= btrfs_item_nr(slot
);
904 total
= btrfs_item_size(eb
, item
);
905 elem_size
= sizeof(*iref
);
907 ptr
= btrfs_item_ptr_offset(eb
, slot
);
908 total
= btrfs_item_size_nr(eb
, slot
);
909 elem_size
= sizeof(*extref
);
912 while (cur
< total
) {
915 if (found_key
->type
== BTRFS_INODE_REF_KEY
) {
916 iref
= (struct btrfs_inode_ref
*)(ptr
+ cur
);
917 name_len
= btrfs_inode_ref_name_len(eb
, iref
);
918 name_off
= (unsigned long)(iref
+ 1);
919 index
= btrfs_inode_ref_index(eb
, iref
);
920 dir
= found_key
->offset
;
922 extref
= (struct btrfs_inode_extref
*)(ptr
+ cur
);
923 name_len
= btrfs_inode_extref_name_len(eb
, extref
);
924 name_off
= (unsigned long)&extref
->name
;
925 index
= btrfs_inode_extref_index(eb
, extref
);
926 dir
= btrfs_inode_extref_parent(eb
, extref
);
930 start
= btrfs_ref_to_path(root
, tmp_path
, name_len
,
934 ret
= PTR_ERR(start
);
937 if (start
< p
->buf
) {
938 /* overflow , try again with larger buffer */
939 ret
= fs_path_ensure_buf(p
,
940 p
->buf_len
+ p
->buf
- start
);
943 start
= btrfs_ref_to_path(root
, tmp_path
,
948 ret
= PTR_ERR(start
);
951 BUG_ON(start
< p
->buf
);
955 ret
= fs_path_add_from_extent_buffer(p
, eb
, name_off
,
961 cur
+= elem_size
+ name_len
;
962 ret
= iterate(num
, dir
, index
, p
, ctx
);
969 btrfs_free_path(tmp_path
);
974 typedef int (*iterate_dir_item_t
)(int num
, struct btrfs_key
*di_key
,
975 const char *name
, int name_len
,
976 const char *data
, int data_len
,
980 * Helper function to iterate the entries in ONE btrfs_dir_item.
981 * The iterate callback may return a non zero value to stop iteration. This can
982 * be a negative value for error codes or 1 to simply stop it.
984 * path must point to the dir item when called.
986 static int iterate_dir_item(struct btrfs_root
*root
, struct btrfs_path
*path
,
987 iterate_dir_item_t iterate
, void *ctx
)
990 struct extent_buffer
*eb
;
991 struct btrfs_item
*item
;
992 struct btrfs_dir_item
*di
;
993 struct btrfs_key di_key
;
1006 * Start with a small buffer (1 page). If later we end up needing more
1007 * space, which can happen for xattrs on a fs with a leaf size greater
1008 * then the page size, attempt to increase the buffer. Typically xattr
1012 buf
= kmalloc(buf_len
, GFP_KERNEL
);
1018 eb
= path
->nodes
[0];
1019 slot
= path
->slots
[0];
1020 item
= btrfs_item_nr(slot
);
1021 di
= btrfs_item_ptr(eb
, slot
, struct btrfs_dir_item
);
1024 total
= btrfs_item_size(eb
, item
);
1027 while (cur
< total
) {
1028 name_len
= btrfs_dir_name_len(eb
, di
);
1029 data_len
= btrfs_dir_data_len(eb
, di
);
1030 type
= btrfs_dir_type(eb
, di
);
1031 btrfs_dir_item_key_to_cpu(eb
, di
, &di_key
);
1033 if (type
== BTRFS_FT_XATTR
) {
1034 if (name_len
> XATTR_NAME_MAX
) {
1035 ret
= -ENAMETOOLONG
;
1038 if (name_len
+ data_len
>
1039 BTRFS_MAX_XATTR_SIZE(root
->fs_info
)) {
1047 if (name_len
+ data_len
> PATH_MAX
) {
1048 ret
= -ENAMETOOLONG
;
1053 if (name_len
+ data_len
> buf_len
) {
1054 buf_len
= name_len
+ data_len
;
1055 if (is_vmalloc_addr(buf
)) {
1059 char *tmp
= krealloc(buf
, buf_len
,
1060 GFP_KERNEL
| __GFP_NOWARN
);
1067 buf
= kvmalloc(buf_len
, GFP_KERNEL
);
1075 read_extent_buffer(eb
, buf
, (unsigned long)(di
+ 1),
1076 name_len
+ data_len
);
1078 len
= sizeof(*di
) + name_len
+ data_len
;
1079 di
= (struct btrfs_dir_item
*)((char *)di
+ len
);
1082 ret
= iterate(num
, &di_key
, buf
, name_len
, buf
+ name_len
,
1083 data_len
, type
, ctx
);
1099 static int __copy_first_ref(int num
, u64 dir
, int index
,
1100 struct fs_path
*p
, void *ctx
)
1103 struct fs_path
*pt
= ctx
;
1105 ret
= fs_path_copy(pt
, p
);
1109 /* we want the first only */
1114 * Retrieve the first path of an inode. If an inode has more then one
1115 * ref/hardlink, this is ignored.
1117 static int get_inode_path(struct btrfs_root
*root
,
1118 u64 ino
, struct fs_path
*path
)
1121 struct btrfs_key key
, found_key
;
1122 struct btrfs_path
*p
;
1124 p
= alloc_path_for_send();
1128 fs_path_reset(path
);
1131 key
.type
= BTRFS_INODE_REF_KEY
;
1134 ret
= btrfs_search_slot_for_read(root
, &key
, p
, 1, 0);
1141 btrfs_item_key_to_cpu(p
->nodes
[0], &found_key
, p
->slots
[0]);
1142 if (found_key
.objectid
!= ino
||
1143 (found_key
.type
!= BTRFS_INODE_REF_KEY
&&
1144 found_key
.type
!= BTRFS_INODE_EXTREF_KEY
)) {
1149 ret
= iterate_inode_ref(root
, p
, &found_key
, 1,
1150 __copy_first_ref
, path
);
1160 struct backref_ctx
{
1161 struct send_ctx
*sctx
;
1163 /* number of total found references */
1167 * used for clones found in send_root. clones found behind cur_objectid
1168 * and cur_offset are not considered as allowed clones.
1173 /* may be truncated in case it's the last extent in a file */
1176 /* data offset in the file extent item */
1179 /* Just to check for bugs in backref resolving */
1183 static int __clone_root_cmp_bsearch(const void *key
, const void *elt
)
1185 u64 root
= (u64
)(uintptr_t)key
;
1186 struct clone_root
*cr
= (struct clone_root
*)elt
;
1188 if (root
< cr
->root
->root_key
.objectid
)
1190 if (root
> cr
->root
->root_key
.objectid
)
1195 static int __clone_root_cmp_sort(const void *e1
, const void *e2
)
1197 struct clone_root
*cr1
= (struct clone_root
*)e1
;
1198 struct clone_root
*cr2
= (struct clone_root
*)e2
;
1200 if (cr1
->root
->root_key
.objectid
< cr2
->root
->root_key
.objectid
)
1202 if (cr1
->root
->root_key
.objectid
> cr2
->root
->root_key
.objectid
)
1208 * Called for every backref that is found for the current extent.
1209 * Results are collected in sctx->clone_roots->ino/offset/found_refs
1211 static int __iterate_backrefs(u64 ino
, u64 offset
, u64 root
, void *ctx_
)
1213 struct backref_ctx
*bctx
= ctx_
;
1214 struct clone_root
*found
;
1216 /* First check if the root is in the list of accepted clone sources */
1217 found
= bsearch((void *)(uintptr_t)root
, bctx
->sctx
->clone_roots
,
1218 bctx
->sctx
->clone_roots_cnt
,
1219 sizeof(struct clone_root
),
1220 __clone_root_cmp_bsearch
);
1224 if (found
->root
== bctx
->sctx
->send_root
&&
1225 ino
== bctx
->cur_objectid
&&
1226 offset
== bctx
->cur_offset
) {
1227 bctx
->found_itself
= 1;
1231 * Make sure we don't consider clones from send_root that are
1232 * behind the current inode/offset.
1234 if (found
->root
== bctx
->sctx
->send_root
) {
1236 * TODO for the moment we don't accept clones from the inode
1237 * that is currently send. We may change this when
1238 * BTRFS_IOC_CLONE_RANGE supports cloning from and to the same
1241 if (ino
>= bctx
->cur_objectid
)
1246 found
->found_refs
++;
1247 if (ino
< found
->ino
) {
1249 found
->offset
= offset
;
1250 } else if (found
->ino
== ino
) {
1252 * same extent found more then once in the same file.
1254 if (found
->offset
> offset
+ bctx
->extent_len
)
1255 found
->offset
= offset
;
1262 * Given an inode, offset and extent item, it finds a good clone for a clone
1263 * instruction. Returns -ENOENT when none could be found. The function makes
1264 * sure that the returned clone is usable at the point where sending is at the
1265 * moment. This means, that no clones are accepted which lie behind the current
1268 * path must point to the extent item when called.
1270 static int find_extent_clone(struct send_ctx
*sctx
,
1271 struct btrfs_path
*path
,
1272 u64 ino
, u64 data_offset
,
1274 struct clone_root
**found
)
1276 struct btrfs_fs_info
*fs_info
= sctx
->send_root
->fs_info
;
1282 u64 extent_item_pos
;
1284 struct btrfs_file_extent_item
*fi
;
1285 struct extent_buffer
*eb
= path
->nodes
[0];
1286 struct backref_ctx
*backref_ctx
= NULL
;
1287 struct clone_root
*cur_clone_root
;
1288 struct btrfs_key found_key
;
1289 struct btrfs_path
*tmp_path
;
1293 tmp_path
= alloc_path_for_send();
1297 /* We only use this path under the commit sem */
1298 tmp_path
->need_commit_sem
= 0;
1300 backref_ctx
= kmalloc(sizeof(*backref_ctx
), GFP_KERNEL
);
1306 if (data_offset
>= ino_size
) {
1308 * There may be extents that lie behind the file's size.
1309 * I at least had this in combination with snapshotting while
1310 * writing large files.
1316 fi
= btrfs_item_ptr(eb
, path
->slots
[0],
1317 struct btrfs_file_extent_item
);
1318 extent_type
= btrfs_file_extent_type(eb
, fi
);
1319 if (extent_type
== BTRFS_FILE_EXTENT_INLINE
) {
1323 compressed
= btrfs_file_extent_compression(eb
, fi
);
1325 num_bytes
= btrfs_file_extent_num_bytes(eb
, fi
);
1326 disk_byte
= btrfs_file_extent_disk_bytenr(eb
, fi
);
1327 if (disk_byte
== 0) {
1331 logical
= disk_byte
+ btrfs_file_extent_offset(eb
, fi
);
1333 down_read(&fs_info
->commit_root_sem
);
1334 ret
= extent_from_logical(fs_info
, disk_byte
, tmp_path
,
1335 &found_key
, &flags
);
1336 up_read(&fs_info
->commit_root_sem
);
1337 btrfs_release_path(tmp_path
);
1341 if (flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
) {
1347 * Setup the clone roots.
1349 for (i
= 0; i
< sctx
->clone_roots_cnt
; i
++) {
1350 cur_clone_root
= sctx
->clone_roots
+ i
;
1351 cur_clone_root
->ino
= (u64
)-1;
1352 cur_clone_root
->offset
= 0;
1353 cur_clone_root
->found_refs
= 0;
1356 backref_ctx
->sctx
= sctx
;
1357 backref_ctx
->found
= 0;
1358 backref_ctx
->cur_objectid
= ino
;
1359 backref_ctx
->cur_offset
= data_offset
;
1360 backref_ctx
->found_itself
= 0;
1361 backref_ctx
->extent_len
= num_bytes
;
1363 * For non-compressed extents iterate_extent_inodes() gives us extent
1364 * offsets that already take into account the data offset, but not for
1365 * compressed extents, since the offset is logical and not relative to
1366 * the physical extent locations. We must take this into account to
1367 * avoid sending clone offsets that go beyond the source file's size,
1368 * which would result in the clone ioctl failing with -EINVAL on the
1371 if (compressed
== BTRFS_COMPRESS_NONE
)
1372 backref_ctx
->data_offset
= 0;
1374 backref_ctx
->data_offset
= btrfs_file_extent_offset(eb
, fi
);
1377 * The last extent of a file may be too large due to page alignment.
1378 * We need to adjust extent_len in this case so that the checks in
1379 * __iterate_backrefs work.
1381 if (data_offset
+ num_bytes
>= ino_size
)
1382 backref_ctx
->extent_len
= ino_size
- data_offset
;
1385 * Now collect all backrefs.
1387 if (compressed
== BTRFS_COMPRESS_NONE
)
1388 extent_item_pos
= logical
- found_key
.objectid
;
1390 extent_item_pos
= 0;
1391 ret
= iterate_extent_inodes(fs_info
, found_key
.objectid
,
1392 extent_item_pos
, 1, __iterate_backrefs
,
1393 backref_ctx
, false);
1398 if (!backref_ctx
->found_itself
) {
1399 /* found a bug in backref code? */
1402 "did not find backref in send_root. inode=%llu, offset=%llu, disk_byte=%llu found extent=%llu",
1403 ino
, data_offset
, disk_byte
, found_key
.objectid
);
1407 btrfs_debug(fs_info
,
1408 "find_extent_clone: data_offset=%llu, ino=%llu, num_bytes=%llu, logical=%llu",
1409 data_offset
, ino
, num_bytes
, logical
);
1411 if (!backref_ctx
->found
)
1412 btrfs_debug(fs_info
, "no clones found");
1414 cur_clone_root
= NULL
;
1415 for (i
= 0; i
< sctx
->clone_roots_cnt
; i
++) {
1416 if (sctx
->clone_roots
[i
].found_refs
) {
1417 if (!cur_clone_root
)
1418 cur_clone_root
= sctx
->clone_roots
+ i
;
1419 else if (sctx
->clone_roots
[i
].root
== sctx
->send_root
)
1420 /* prefer clones from send_root over others */
1421 cur_clone_root
= sctx
->clone_roots
+ i
;
1426 if (cur_clone_root
) {
1427 *found
= cur_clone_root
;
1434 btrfs_free_path(tmp_path
);
1439 static int read_symlink(struct btrfs_root
*root
,
1441 struct fs_path
*dest
)
1444 struct btrfs_path
*path
;
1445 struct btrfs_key key
;
1446 struct btrfs_file_extent_item
*ei
;
1452 path
= alloc_path_for_send();
1457 key
.type
= BTRFS_EXTENT_DATA_KEY
;
1459 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
1464 * An empty symlink inode. Can happen in rare error paths when
1465 * creating a symlink (transaction committed before the inode
1466 * eviction handler removed the symlink inode items and a crash
1467 * happened in between or the subvol was snapshoted in between).
1468 * Print an informative message to dmesg/syslog so that the user
1469 * can delete the symlink.
1471 btrfs_err(root
->fs_info
,
1472 "Found empty symlink inode %llu at root %llu",
1473 ino
, root
->root_key
.objectid
);
1478 ei
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
1479 struct btrfs_file_extent_item
);
1480 type
= btrfs_file_extent_type(path
->nodes
[0], ei
);
1481 compression
= btrfs_file_extent_compression(path
->nodes
[0], ei
);
1482 BUG_ON(type
!= BTRFS_FILE_EXTENT_INLINE
);
1483 BUG_ON(compression
);
1485 off
= btrfs_file_extent_inline_start(ei
);
1486 len
= btrfs_file_extent_ram_bytes(path
->nodes
[0], ei
);
1488 ret
= fs_path_add_from_extent_buffer(dest
, path
->nodes
[0], off
, len
);
1491 btrfs_free_path(path
);
1496 * Helper function to generate a file name that is unique in the root of
1497 * send_root and parent_root. This is used to generate names for orphan inodes.
1499 static int gen_unique_name(struct send_ctx
*sctx
,
1501 struct fs_path
*dest
)
1504 struct btrfs_path
*path
;
1505 struct btrfs_dir_item
*di
;
1510 path
= alloc_path_for_send();
1515 len
= snprintf(tmp
, sizeof(tmp
), "o%llu-%llu-%llu",
1517 ASSERT(len
< sizeof(tmp
));
1519 di
= btrfs_lookup_dir_item(NULL
, sctx
->send_root
,
1520 path
, BTRFS_FIRST_FREE_OBJECTID
,
1521 tmp
, strlen(tmp
), 0);
1522 btrfs_release_path(path
);
1528 /* not unique, try again */
1533 if (!sctx
->parent_root
) {
1539 di
= btrfs_lookup_dir_item(NULL
, sctx
->parent_root
,
1540 path
, BTRFS_FIRST_FREE_OBJECTID
,
1541 tmp
, strlen(tmp
), 0);
1542 btrfs_release_path(path
);
1548 /* not unique, try again */
1556 ret
= fs_path_add(dest
, tmp
, strlen(tmp
));
1559 btrfs_free_path(path
);
1564 inode_state_no_change
,
1565 inode_state_will_create
,
1566 inode_state_did_create
,
1567 inode_state_will_delete
,
1568 inode_state_did_delete
,
1571 static int get_cur_inode_state(struct send_ctx
*sctx
, u64 ino
, u64 gen
)
1579 ret
= get_inode_info(sctx
->send_root
, ino
, NULL
, &left_gen
, NULL
, NULL
,
1581 if (ret
< 0 && ret
!= -ENOENT
)
1585 if (!sctx
->parent_root
) {
1586 right_ret
= -ENOENT
;
1588 ret
= get_inode_info(sctx
->parent_root
, ino
, NULL
, &right_gen
,
1589 NULL
, NULL
, NULL
, NULL
);
1590 if (ret
< 0 && ret
!= -ENOENT
)
1595 if (!left_ret
&& !right_ret
) {
1596 if (left_gen
== gen
&& right_gen
== gen
) {
1597 ret
= inode_state_no_change
;
1598 } else if (left_gen
== gen
) {
1599 if (ino
< sctx
->send_progress
)
1600 ret
= inode_state_did_create
;
1602 ret
= inode_state_will_create
;
1603 } else if (right_gen
== gen
) {
1604 if (ino
< sctx
->send_progress
)
1605 ret
= inode_state_did_delete
;
1607 ret
= inode_state_will_delete
;
1611 } else if (!left_ret
) {
1612 if (left_gen
== gen
) {
1613 if (ino
< sctx
->send_progress
)
1614 ret
= inode_state_did_create
;
1616 ret
= inode_state_will_create
;
1620 } else if (!right_ret
) {
1621 if (right_gen
== gen
) {
1622 if (ino
< sctx
->send_progress
)
1623 ret
= inode_state_did_delete
;
1625 ret
= inode_state_will_delete
;
1637 static int is_inode_existent(struct send_ctx
*sctx
, u64 ino
, u64 gen
)
1641 if (ino
== BTRFS_FIRST_FREE_OBJECTID
)
1644 ret
= get_cur_inode_state(sctx
, ino
, gen
);
1648 if (ret
== inode_state_no_change
||
1649 ret
== inode_state_did_create
||
1650 ret
== inode_state_will_delete
)
1660 * Helper function to lookup a dir item in a dir.
1662 static int lookup_dir_item_inode(struct btrfs_root
*root
,
1663 u64 dir
, const char *name
, int name_len
,
1668 struct btrfs_dir_item
*di
;
1669 struct btrfs_key key
;
1670 struct btrfs_path
*path
;
1672 path
= alloc_path_for_send();
1676 di
= btrfs_lookup_dir_item(NULL
, root
, path
,
1677 dir
, name
, name_len
, 0);
1678 if (IS_ERR_OR_NULL(di
)) {
1679 ret
= di
? PTR_ERR(di
) : -ENOENT
;
1682 btrfs_dir_item_key_to_cpu(path
->nodes
[0], di
, &key
);
1683 if (key
.type
== BTRFS_ROOT_ITEM_KEY
) {
1687 *found_inode
= key
.objectid
;
1688 *found_type
= btrfs_dir_type(path
->nodes
[0], di
);
1691 btrfs_free_path(path
);
1696 * Looks up the first btrfs_inode_ref of a given ino. It returns the parent dir,
1697 * generation of the parent dir and the name of the dir entry.
1699 static int get_first_ref(struct btrfs_root
*root
, u64 ino
,
1700 u64
*dir
, u64
*dir_gen
, struct fs_path
*name
)
1703 struct btrfs_key key
;
1704 struct btrfs_key found_key
;
1705 struct btrfs_path
*path
;
1709 path
= alloc_path_for_send();
1714 key
.type
= BTRFS_INODE_REF_KEY
;
1717 ret
= btrfs_search_slot_for_read(root
, &key
, path
, 1, 0);
1721 btrfs_item_key_to_cpu(path
->nodes
[0], &found_key
,
1723 if (ret
|| found_key
.objectid
!= ino
||
1724 (found_key
.type
!= BTRFS_INODE_REF_KEY
&&
1725 found_key
.type
!= BTRFS_INODE_EXTREF_KEY
)) {
1730 if (found_key
.type
== BTRFS_INODE_REF_KEY
) {
1731 struct btrfs_inode_ref
*iref
;
1732 iref
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
1733 struct btrfs_inode_ref
);
1734 len
= btrfs_inode_ref_name_len(path
->nodes
[0], iref
);
1735 ret
= fs_path_add_from_extent_buffer(name
, path
->nodes
[0],
1736 (unsigned long)(iref
+ 1),
1738 parent_dir
= found_key
.offset
;
1740 struct btrfs_inode_extref
*extref
;
1741 extref
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
1742 struct btrfs_inode_extref
);
1743 len
= btrfs_inode_extref_name_len(path
->nodes
[0], extref
);
1744 ret
= fs_path_add_from_extent_buffer(name
, path
->nodes
[0],
1745 (unsigned long)&extref
->name
, len
);
1746 parent_dir
= btrfs_inode_extref_parent(path
->nodes
[0], extref
);
1750 btrfs_release_path(path
);
1753 ret
= get_inode_info(root
, parent_dir
, NULL
, dir_gen
, NULL
,
1762 btrfs_free_path(path
);
1766 static int is_first_ref(struct btrfs_root
*root
,
1768 const char *name
, int name_len
)
1771 struct fs_path
*tmp_name
;
1774 tmp_name
= fs_path_alloc();
1778 ret
= get_first_ref(root
, ino
, &tmp_dir
, NULL
, tmp_name
);
1782 if (dir
!= tmp_dir
|| name_len
!= fs_path_len(tmp_name
)) {
1787 ret
= !memcmp(tmp_name
->start
, name
, name_len
);
1790 fs_path_free(tmp_name
);
1795 * Used by process_recorded_refs to determine if a new ref would overwrite an
1796 * already existing ref. In case it detects an overwrite, it returns the
1797 * inode/gen in who_ino/who_gen.
1798 * When an overwrite is detected, process_recorded_refs does proper orphanizing
1799 * to make sure later references to the overwritten inode are possible.
1800 * Orphanizing is however only required for the first ref of an inode.
1801 * process_recorded_refs does an additional is_first_ref check to see if
1802 * orphanizing is really required.
1804 static int will_overwrite_ref(struct send_ctx
*sctx
, u64 dir
, u64 dir_gen
,
1805 const char *name
, int name_len
,
1806 u64
*who_ino
, u64
*who_gen
, u64
*who_mode
)
1810 u64 other_inode
= 0;
1813 if (!sctx
->parent_root
)
1816 ret
= is_inode_existent(sctx
, dir
, dir_gen
);
1821 * If we have a parent root we need to verify that the parent dir was
1822 * not deleted and then re-created, if it was then we have no overwrite
1823 * and we can just unlink this entry.
1825 if (sctx
->parent_root
&& dir
!= BTRFS_FIRST_FREE_OBJECTID
) {
1826 ret
= get_inode_info(sctx
->parent_root
, dir
, NULL
, &gen
, NULL
,
1828 if (ret
< 0 && ret
!= -ENOENT
)
1838 ret
= lookup_dir_item_inode(sctx
->parent_root
, dir
, name
, name_len
,
1839 &other_inode
, &other_type
);
1840 if (ret
< 0 && ret
!= -ENOENT
)
1848 * Check if the overwritten ref was already processed. If yes, the ref
1849 * was already unlinked/moved, so we can safely assume that we will not
1850 * overwrite anything at this point in time.
1852 if (other_inode
> sctx
->send_progress
||
1853 is_waiting_for_move(sctx
, other_inode
)) {
1854 ret
= get_inode_info(sctx
->parent_root
, other_inode
, NULL
,
1855 who_gen
, who_mode
, NULL
, NULL
, NULL
);
1860 *who_ino
= other_inode
;
1870 * Checks if the ref was overwritten by an already processed inode. This is
1871 * used by __get_cur_name_and_parent to find out if the ref was orphanized and
1872 * thus the orphan name needs be used.
1873 * process_recorded_refs also uses it to avoid unlinking of refs that were
1876 static int did_overwrite_ref(struct send_ctx
*sctx
,
1877 u64 dir
, u64 dir_gen
,
1878 u64 ino
, u64 ino_gen
,
1879 const char *name
, int name_len
)
1886 if (!sctx
->parent_root
)
1889 ret
= is_inode_existent(sctx
, dir
, dir_gen
);
1893 if (dir
!= BTRFS_FIRST_FREE_OBJECTID
) {
1894 ret
= get_inode_info(sctx
->send_root
, dir
, NULL
, &gen
, NULL
,
1896 if (ret
< 0 && ret
!= -ENOENT
)
1906 /* check if the ref was overwritten by another ref */
1907 ret
= lookup_dir_item_inode(sctx
->send_root
, dir
, name
, name_len
,
1908 &ow_inode
, &other_type
);
1909 if (ret
< 0 && ret
!= -ENOENT
)
1912 /* was never and will never be overwritten */
1917 ret
= get_inode_info(sctx
->send_root
, ow_inode
, NULL
, &gen
, NULL
, NULL
,
1922 if (ow_inode
== ino
&& gen
== ino_gen
) {
1928 * We know that it is or will be overwritten. Check this now.
1929 * The current inode being processed might have been the one that caused
1930 * inode 'ino' to be orphanized, therefore check if ow_inode matches
1931 * the current inode being processed.
1933 if ((ow_inode
< sctx
->send_progress
) ||
1934 (ino
!= sctx
->cur_ino
&& ow_inode
== sctx
->cur_ino
&&
1935 gen
== sctx
->cur_inode_gen
))
1945 * Same as did_overwrite_ref, but also checks if it is the first ref of an inode
1946 * that got overwritten. This is used by process_recorded_refs to determine
1947 * if it has to use the path as returned by get_cur_path or the orphan name.
1949 static int did_overwrite_first_ref(struct send_ctx
*sctx
, u64 ino
, u64 gen
)
1952 struct fs_path
*name
= NULL
;
1956 if (!sctx
->parent_root
)
1959 name
= fs_path_alloc();
1963 ret
= get_first_ref(sctx
->parent_root
, ino
, &dir
, &dir_gen
, name
);
1967 ret
= did_overwrite_ref(sctx
, dir
, dir_gen
, ino
, gen
,
1968 name
->start
, fs_path_len(name
));
1976 * Insert a name cache entry. On 32bit kernels the radix tree index is 32bit,
1977 * so we need to do some special handling in case we have clashes. This function
1978 * takes care of this with the help of name_cache_entry::radix_list.
1979 * In case of error, nce is kfreed.
1981 static int name_cache_insert(struct send_ctx
*sctx
,
1982 struct name_cache_entry
*nce
)
1985 struct list_head
*nce_head
;
1987 nce_head
= radix_tree_lookup(&sctx
->name_cache
,
1988 (unsigned long)nce
->ino
);
1990 nce_head
= kmalloc(sizeof(*nce_head
), GFP_KERNEL
);
1995 INIT_LIST_HEAD(nce_head
);
1997 ret
= radix_tree_insert(&sctx
->name_cache
, nce
->ino
, nce_head
);
2004 list_add_tail(&nce
->radix_list
, nce_head
);
2005 list_add_tail(&nce
->list
, &sctx
->name_cache_list
);
2006 sctx
->name_cache_size
++;
2011 static void name_cache_delete(struct send_ctx
*sctx
,
2012 struct name_cache_entry
*nce
)
2014 struct list_head
*nce_head
;
2016 nce_head
= radix_tree_lookup(&sctx
->name_cache
,
2017 (unsigned long)nce
->ino
);
2019 btrfs_err(sctx
->send_root
->fs_info
,
2020 "name_cache_delete lookup failed ino %llu cache size %d, leaking memory",
2021 nce
->ino
, sctx
->name_cache_size
);
2024 list_del(&nce
->radix_list
);
2025 list_del(&nce
->list
);
2026 sctx
->name_cache_size
--;
2029 * We may not get to the final release of nce_head if the lookup fails
2031 if (nce_head
&& list_empty(nce_head
)) {
2032 radix_tree_delete(&sctx
->name_cache
, (unsigned long)nce
->ino
);
2037 static struct name_cache_entry
*name_cache_search(struct send_ctx
*sctx
,
2040 struct list_head
*nce_head
;
2041 struct name_cache_entry
*cur
;
2043 nce_head
= radix_tree_lookup(&sctx
->name_cache
, (unsigned long)ino
);
2047 list_for_each_entry(cur
, nce_head
, radix_list
) {
2048 if (cur
->ino
== ino
&& cur
->gen
== gen
)
2055 * Removes the entry from the list and adds it back to the end. This marks the
2056 * entry as recently used so that name_cache_clean_unused does not remove it.
2058 static void name_cache_used(struct send_ctx
*sctx
, struct name_cache_entry
*nce
)
2060 list_del(&nce
->list
);
2061 list_add_tail(&nce
->list
, &sctx
->name_cache_list
);
2065 * Remove some entries from the beginning of name_cache_list.
2067 static void name_cache_clean_unused(struct send_ctx
*sctx
)
2069 struct name_cache_entry
*nce
;
2071 if (sctx
->name_cache_size
< SEND_CTX_NAME_CACHE_CLEAN_SIZE
)
2074 while (sctx
->name_cache_size
> SEND_CTX_MAX_NAME_CACHE_SIZE
) {
2075 nce
= list_entry(sctx
->name_cache_list
.next
,
2076 struct name_cache_entry
, list
);
2077 name_cache_delete(sctx
, nce
);
2082 static void name_cache_free(struct send_ctx
*sctx
)
2084 struct name_cache_entry
*nce
;
2086 while (!list_empty(&sctx
->name_cache_list
)) {
2087 nce
= list_entry(sctx
->name_cache_list
.next
,
2088 struct name_cache_entry
, list
);
2089 name_cache_delete(sctx
, nce
);
2095 * Used by get_cur_path for each ref up to the root.
2096 * Returns 0 if it succeeded.
2097 * Returns 1 if the inode is not existent or got overwritten. In that case, the
2098 * name is an orphan name. This instructs get_cur_path to stop iterating. If 1
2099 * is returned, parent_ino/parent_gen are not guaranteed to be valid.
2100 * Returns <0 in case of error.
2102 static int __get_cur_name_and_parent(struct send_ctx
*sctx
,
2106 struct fs_path
*dest
)
2110 struct name_cache_entry
*nce
= NULL
;
2113 * First check if we already did a call to this function with the same
2114 * ino/gen. If yes, check if the cache entry is still up-to-date. If yes
2115 * return the cached result.
2117 nce
= name_cache_search(sctx
, ino
, gen
);
2119 if (ino
< sctx
->send_progress
&& nce
->need_later_update
) {
2120 name_cache_delete(sctx
, nce
);
2124 name_cache_used(sctx
, nce
);
2125 *parent_ino
= nce
->parent_ino
;
2126 *parent_gen
= nce
->parent_gen
;
2127 ret
= fs_path_add(dest
, nce
->name
, nce
->name_len
);
2136 * If the inode is not existent yet, add the orphan name and return 1.
2137 * This should only happen for the parent dir that we determine in
2140 ret
= is_inode_existent(sctx
, ino
, gen
);
2145 ret
= gen_unique_name(sctx
, ino
, gen
, dest
);
2153 * Depending on whether the inode was already processed or not, use
2154 * send_root or parent_root for ref lookup.
2156 if (ino
< sctx
->send_progress
)
2157 ret
= get_first_ref(sctx
->send_root
, ino
,
2158 parent_ino
, parent_gen
, dest
);
2160 ret
= get_first_ref(sctx
->parent_root
, ino
,
2161 parent_ino
, parent_gen
, dest
);
2166 * Check if the ref was overwritten by an inode's ref that was processed
2167 * earlier. If yes, treat as orphan and return 1.
2169 ret
= did_overwrite_ref(sctx
, *parent_ino
, *parent_gen
, ino
, gen
,
2170 dest
->start
, dest
->end
- dest
->start
);
2174 fs_path_reset(dest
);
2175 ret
= gen_unique_name(sctx
, ino
, gen
, dest
);
2183 * Store the result of the lookup in the name cache.
2185 nce
= kmalloc(sizeof(*nce
) + fs_path_len(dest
) + 1, GFP_KERNEL
);
2193 nce
->parent_ino
= *parent_ino
;
2194 nce
->parent_gen
= *parent_gen
;
2195 nce
->name_len
= fs_path_len(dest
);
2197 strcpy(nce
->name
, dest
->start
);
2199 if (ino
< sctx
->send_progress
)
2200 nce
->need_later_update
= 0;
2202 nce
->need_later_update
= 1;
2204 nce_ret
= name_cache_insert(sctx
, nce
);
2207 name_cache_clean_unused(sctx
);
2214 * Magic happens here. This function returns the first ref to an inode as it
2215 * would look like while receiving the stream at this point in time.
2216 * We walk the path up to the root. For every inode in between, we check if it
2217 * was already processed/sent. If yes, we continue with the parent as found
2218 * in send_root. If not, we continue with the parent as found in parent_root.
2219 * If we encounter an inode that was deleted at this point in time, we use the
2220 * inodes "orphan" name instead of the real name and stop. Same with new inodes
2221 * that were not created yet and overwritten inodes/refs.
2223 * When do we have orphan inodes:
2224 * 1. When an inode is freshly created and thus no valid refs are available yet
2225 * 2. When a directory lost all it's refs (deleted) but still has dir items
2226 * inside which were not processed yet (pending for move/delete). If anyone
2227 * tried to get the path to the dir items, it would get a path inside that
2229 * 3. When an inode is moved around or gets new links, it may overwrite the ref
2230 * of an unprocessed inode. If in that case the first ref would be
2231 * overwritten, the overwritten inode gets "orphanized". Later when we
2232 * process this overwritten inode, it is restored at a new place by moving
2235 * sctx->send_progress tells this function at which point in time receiving
2238 static int get_cur_path(struct send_ctx
*sctx
, u64 ino
, u64 gen
,
2239 struct fs_path
*dest
)
2242 struct fs_path
*name
= NULL
;
2243 u64 parent_inode
= 0;
2247 name
= fs_path_alloc();
2254 fs_path_reset(dest
);
2256 while (!stop
&& ino
!= BTRFS_FIRST_FREE_OBJECTID
) {
2257 struct waiting_dir_move
*wdm
;
2259 fs_path_reset(name
);
2261 if (is_waiting_for_rm(sctx
, ino
)) {
2262 ret
= gen_unique_name(sctx
, ino
, gen
, name
);
2265 ret
= fs_path_add_path(dest
, name
);
2269 wdm
= get_waiting_dir_move(sctx
, ino
);
2270 if (wdm
&& wdm
->orphanized
) {
2271 ret
= gen_unique_name(sctx
, ino
, gen
, name
);
2274 ret
= get_first_ref(sctx
->parent_root
, ino
,
2275 &parent_inode
, &parent_gen
, name
);
2277 ret
= __get_cur_name_and_parent(sctx
, ino
, gen
,
2287 ret
= fs_path_add_path(dest
, name
);
2298 fs_path_unreverse(dest
);
2303 * Sends a BTRFS_SEND_C_SUBVOL command/item to userspace
2305 static int send_subvol_begin(struct send_ctx
*sctx
)
2308 struct btrfs_root
*send_root
= sctx
->send_root
;
2309 struct btrfs_root
*parent_root
= sctx
->parent_root
;
2310 struct btrfs_path
*path
;
2311 struct btrfs_key key
;
2312 struct btrfs_root_ref
*ref
;
2313 struct extent_buffer
*leaf
;
2317 path
= btrfs_alloc_path();
2321 name
= kmalloc(BTRFS_PATH_NAME_MAX
, GFP_KERNEL
);
2323 btrfs_free_path(path
);
2327 key
.objectid
= send_root
->root_key
.objectid
;
2328 key
.type
= BTRFS_ROOT_BACKREF_KEY
;
2331 ret
= btrfs_search_slot_for_read(send_root
->fs_info
->tree_root
,
2340 leaf
= path
->nodes
[0];
2341 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
2342 if (key
.type
!= BTRFS_ROOT_BACKREF_KEY
||
2343 key
.objectid
!= send_root
->root_key
.objectid
) {
2347 ref
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_root_ref
);
2348 namelen
= btrfs_root_ref_name_len(leaf
, ref
);
2349 read_extent_buffer(leaf
, name
, (unsigned long)(ref
+ 1), namelen
);
2350 btrfs_release_path(path
);
2353 ret
= begin_cmd(sctx
, BTRFS_SEND_C_SNAPSHOT
);
2357 ret
= begin_cmd(sctx
, BTRFS_SEND_C_SUBVOL
);
2362 TLV_PUT_STRING(sctx
, BTRFS_SEND_A_PATH
, name
, namelen
);
2364 if (!btrfs_is_empty_uuid(sctx
->send_root
->root_item
.received_uuid
))
2365 TLV_PUT_UUID(sctx
, BTRFS_SEND_A_UUID
,
2366 sctx
->send_root
->root_item
.received_uuid
);
2368 TLV_PUT_UUID(sctx
, BTRFS_SEND_A_UUID
,
2369 sctx
->send_root
->root_item
.uuid
);
2371 TLV_PUT_U64(sctx
, BTRFS_SEND_A_CTRANSID
,
2372 le64_to_cpu(sctx
->send_root
->root_item
.ctransid
));
2374 if (!btrfs_is_empty_uuid(parent_root
->root_item
.received_uuid
))
2375 TLV_PUT_UUID(sctx
, BTRFS_SEND_A_CLONE_UUID
,
2376 parent_root
->root_item
.received_uuid
);
2378 TLV_PUT_UUID(sctx
, BTRFS_SEND_A_CLONE_UUID
,
2379 parent_root
->root_item
.uuid
);
2380 TLV_PUT_U64(sctx
, BTRFS_SEND_A_CLONE_CTRANSID
,
2381 le64_to_cpu(sctx
->parent_root
->root_item
.ctransid
));
2384 ret
= send_cmd(sctx
);
2388 btrfs_free_path(path
);
2393 static int send_truncate(struct send_ctx
*sctx
, u64 ino
, u64 gen
, u64 size
)
2395 struct btrfs_fs_info
*fs_info
= sctx
->send_root
->fs_info
;
2399 btrfs_debug(fs_info
, "send_truncate %llu size=%llu", ino
, size
);
2401 p
= fs_path_alloc();
2405 ret
= begin_cmd(sctx
, BTRFS_SEND_C_TRUNCATE
);
2409 ret
= get_cur_path(sctx
, ino
, gen
, p
);
2412 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH
, p
);
2413 TLV_PUT_U64(sctx
, BTRFS_SEND_A_SIZE
, size
);
2415 ret
= send_cmd(sctx
);
2423 static int send_chmod(struct send_ctx
*sctx
, u64 ino
, u64 gen
, u64 mode
)
2425 struct btrfs_fs_info
*fs_info
= sctx
->send_root
->fs_info
;
2429 btrfs_debug(fs_info
, "send_chmod %llu mode=%llu", ino
, mode
);
2431 p
= fs_path_alloc();
2435 ret
= begin_cmd(sctx
, BTRFS_SEND_C_CHMOD
);
2439 ret
= get_cur_path(sctx
, ino
, gen
, p
);
2442 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH
, p
);
2443 TLV_PUT_U64(sctx
, BTRFS_SEND_A_MODE
, mode
& 07777);
2445 ret
= send_cmd(sctx
);
2453 static int send_chown(struct send_ctx
*sctx
, u64 ino
, u64 gen
, u64 uid
, u64 gid
)
2455 struct btrfs_fs_info
*fs_info
= sctx
->send_root
->fs_info
;
2459 btrfs_debug(fs_info
, "send_chown %llu uid=%llu, gid=%llu",
2462 p
= fs_path_alloc();
2466 ret
= begin_cmd(sctx
, BTRFS_SEND_C_CHOWN
);
2470 ret
= get_cur_path(sctx
, ino
, gen
, p
);
2473 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH
, p
);
2474 TLV_PUT_U64(sctx
, BTRFS_SEND_A_UID
, uid
);
2475 TLV_PUT_U64(sctx
, BTRFS_SEND_A_GID
, gid
);
2477 ret
= send_cmd(sctx
);
2485 static int send_utimes(struct send_ctx
*sctx
, u64 ino
, u64 gen
)
2487 struct btrfs_fs_info
*fs_info
= sctx
->send_root
->fs_info
;
2489 struct fs_path
*p
= NULL
;
2490 struct btrfs_inode_item
*ii
;
2491 struct btrfs_path
*path
= NULL
;
2492 struct extent_buffer
*eb
;
2493 struct btrfs_key key
;
2496 btrfs_debug(fs_info
, "send_utimes %llu", ino
);
2498 p
= fs_path_alloc();
2502 path
= alloc_path_for_send();
2509 key
.type
= BTRFS_INODE_ITEM_KEY
;
2511 ret
= btrfs_search_slot(NULL
, sctx
->send_root
, &key
, path
, 0, 0);
2517 eb
= path
->nodes
[0];
2518 slot
= path
->slots
[0];
2519 ii
= btrfs_item_ptr(eb
, slot
, struct btrfs_inode_item
);
2521 ret
= begin_cmd(sctx
, BTRFS_SEND_C_UTIMES
);
2525 ret
= get_cur_path(sctx
, ino
, gen
, p
);
2528 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH
, p
);
2529 TLV_PUT_BTRFS_TIMESPEC(sctx
, BTRFS_SEND_A_ATIME
, eb
, &ii
->atime
);
2530 TLV_PUT_BTRFS_TIMESPEC(sctx
, BTRFS_SEND_A_MTIME
, eb
, &ii
->mtime
);
2531 TLV_PUT_BTRFS_TIMESPEC(sctx
, BTRFS_SEND_A_CTIME
, eb
, &ii
->ctime
);
2532 /* TODO Add otime support when the otime patches get into upstream */
2534 ret
= send_cmd(sctx
);
2539 btrfs_free_path(path
);
2544 * Sends a BTRFS_SEND_C_MKXXX or SYMLINK command to user space. We don't have
2545 * a valid path yet because we did not process the refs yet. So, the inode
2546 * is created as orphan.
2548 static int send_create_inode(struct send_ctx
*sctx
, u64 ino
)
2550 struct btrfs_fs_info
*fs_info
= sctx
->send_root
->fs_info
;
2558 btrfs_debug(fs_info
, "send_create_inode %llu", ino
);
2560 p
= fs_path_alloc();
2564 if (ino
!= sctx
->cur_ino
) {
2565 ret
= get_inode_info(sctx
->send_root
, ino
, NULL
, &gen
, &mode
,
2570 gen
= sctx
->cur_inode_gen
;
2571 mode
= sctx
->cur_inode_mode
;
2572 rdev
= sctx
->cur_inode_rdev
;
2575 if (S_ISREG(mode
)) {
2576 cmd
= BTRFS_SEND_C_MKFILE
;
2577 } else if (S_ISDIR(mode
)) {
2578 cmd
= BTRFS_SEND_C_MKDIR
;
2579 } else if (S_ISLNK(mode
)) {
2580 cmd
= BTRFS_SEND_C_SYMLINK
;
2581 } else if (S_ISCHR(mode
) || S_ISBLK(mode
)) {
2582 cmd
= BTRFS_SEND_C_MKNOD
;
2583 } else if (S_ISFIFO(mode
)) {
2584 cmd
= BTRFS_SEND_C_MKFIFO
;
2585 } else if (S_ISSOCK(mode
)) {
2586 cmd
= BTRFS_SEND_C_MKSOCK
;
2588 btrfs_warn(sctx
->send_root
->fs_info
, "unexpected inode type %o",
2589 (int)(mode
& S_IFMT
));
2594 ret
= begin_cmd(sctx
, cmd
);
2598 ret
= gen_unique_name(sctx
, ino
, gen
, p
);
2602 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH
, p
);
2603 TLV_PUT_U64(sctx
, BTRFS_SEND_A_INO
, ino
);
2605 if (S_ISLNK(mode
)) {
2607 ret
= read_symlink(sctx
->send_root
, ino
, p
);
2610 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH_LINK
, p
);
2611 } else if (S_ISCHR(mode
) || S_ISBLK(mode
) ||
2612 S_ISFIFO(mode
) || S_ISSOCK(mode
)) {
2613 TLV_PUT_U64(sctx
, BTRFS_SEND_A_RDEV
, new_encode_dev(rdev
));
2614 TLV_PUT_U64(sctx
, BTRFS_SEND_A_MODE
, mode
);
2617 ret
= send_cmd(sctx
);
2629 * We need some special handling for inodes that get processed before the parent
2630 * directory got created. See process_recorded_refs for details.
2631 * This function does the check if we already created the dir out of order.
2633 static int did_create_dir(struct send_ctx
*sctx
, u64 dir
)
2636 struct btrfs_path
*path
= NULL
;
2637 struct btrfs_key key
;
2638 struct btrfs_key found_key
;
2639 struct btrfs_key di_key
;
2640 struct extent_buffer
*eb
;
2641 struct btrfs_dir_item
*di
;
2644 path
= alloc_path_for_send();
2651 key
.type
= BTRFS_DIR_INDEX_KEY
;
2653 ret
= btrfs_search_slot(NULL
, sctx
->send_root
, &key
, path
, 0, 0);
2658 eb
= path
->nodes
[0];
2659 slot
= path
->slots
[0];
2660 if (slot
>= btrfs_header_nritems(eb
)) {
2661 ret
= btrfs_next_leaf(sctx
->send_root
, path
);
2664 } else if (ret
> 0) {
2671 btrfs_item_key_to_cpu(eb
, &found_key
, slot
);
2672 if (found_key
.objectid
!= key
.objectid
||
2673 found_key
.type
!= key
.type
) {
2678 di
= btrfs_item_ptr(eb
, slot
, struct btrfs_dir_item
);
2679 btrfs_dir_item_key_to_cpu(eb
, di
, &di_key
);
2681 if (di_key
.type
!= BTRFS_ROOT_ITEM_KEY
&&
2682 di_key
.objectid
< sctx
->send_progress
) {
2691 btrfs_free_path(path
);
2696 * Only creates the inode if it is:
2697 * 1. Not a directory
2698 * 2. Or a directory which was not created already due to out of order
2699 * directories. See did_create_dir and process_recorded_refs for details.
2701 static int send_create_inode_if_needed(struct send_ctx
*sctx
)
2705 if (S_ISDIR(sctx
->cur_inode_mode
)) {
2706 ret
= did_create_dir(sctx
, sctx
->cur_ino
);
2715 ret
= send_create_inode(sctx
, sctx
->cur_ino
);
2723 struct recorded_ref
{
2724 struct list_head list
;
2726 struct fs_path
*full_path
;
2732 static void set_ref_path(struct recorded_ref
*ref
, struct fs_path
*path
)
2734 ref
->full_path
= path
;
2735 ref
->name
= (char *)kbasename(ref
->full_path
->start
);
2736 ref
->name_len
= ref
->full_path
->end
- ref
->name
;
2740 * We need to process new refs before deleted refs, but compare_tree gives us
2741 * everything mixed. So we first record all refs and later process them.
2742 * This function is a helper to record one ref.
2744 static int __record_ref(struct list_head
*head
, u64 dir
,
2745 u64 dir_gen
, struct fs_path
*path
)
2747 struct recorded_ref
*ref
;
2749 ref
= kmalloc(sizeof(*ref
), GFP_KERNEL
);
2754 ref
->dir_gen
= dir_gen
;
2755 set_ref_path(ref
, path
);
2756 list_add_tail(&ref
->list
, head
);
2760 static int dup_ref(struct recorded_ref
*ref
, struct list_head
*list
)
2762 struct recorded_ref
*new;
2764 new = kmalloc(sizeof(*ref
), GFP_KERNEL
);
2768 new->dir
= ref
->dir
;
2769 new->dir_gen
= ref
->dir_gen
;
2770 new->full_path
= NULL
;
2771 INIT_LIST_HEAD(&new->list
);
2772 list_add_tail(&new->list
, list
);
2776 static void __free_recorded_refs(struct list_head
*head
)
2778 struct recorded_ref
*cur
;
2780 while (!list_empty(head
)) {
2781 cur
= list_entry(head
->next
, struct recorded_ref
, list
);
2782 fs_path_free(cur
->full_path
);
2783 list_del(&cur
->list
);
2788 static void free_recorded_refs(struct send_ctx
*sctx
)
2790 __free_recorded_refs(&sctx
->new_refs
);
2791 __free_recorded_refs(&sctx
->deleted_refs
);
2795 * Renames/moves a file/dir to its orphan name. Used when the first
2796 * ref of an unprocessed inode gets overwritten and for all non empty
2799 static int orphanize_inode(struct send_ctx
*sctx
, u64 ino
, u64 gen
,
2800 struct fs_path
*path
)
2803 struct fs_path
*orphan
;
2805 orphan
= fs_path_alloc();
2809 ret
= gen_unique_name(sctx
, ino
, gen
, orphan
);
2813 ret
= send_rename(sctx
, path
, orphan
);
2816 fs_path_free(orphan
);
2820 static struct orphan_dir_info
*
2821 add_orphan_dir_info(struct send_ctx
*sctx
, u64 dir_ino
)
2823 struct rb_node
**p
= &sctx
->orphan_dirs
.rb_node
;
2824 struct rb_node
*parent
= NULL
;
2825 struct orphan_dir_info
*entry
, *odi
;
2829 entry
= rb_entry(parent
, struct orphan_dir_info
, node
);
2830 if (dir_ino
< entry
->ino
) {
2832 } else if (dir_ino
> entry
->ino
) {
2833 p
= &(*p
)->rb_right
;
2839 odi
= kmalloc(sizeof(*odi
), GFP_KERNEL
);
2841 return ERR_PTR(-ENOMEM
);
2844 odi
->last_dir_index_offset
= 0;
2846 rb_link_node(&odi
->node
, parent
, p
);
2847 rb_insert_color(&odi
->node
, &sctx
->orphan_dirs
);
2851 static struct orphan_dir_info
*
2852 get_orphan_dir_info(struct send_ctx
*sctx
, u64 dir_ino
)
2854 struct rb_node
*n
= sctx
->orphan_dirs
.rb_node
;
2855 struct orphan_dir_info
*entry
;
2858 entry
= rb_entry(n
, struct orphan_dir_info
, node
);
2859 if (dir_ino
< entry
->ino
)
2861 else if (dir_ino
> entry
->ino
)
2869 static int is_waiting_for_rm(struct send_ctx
*sctx
, u64 dir_ino
)
2871 struct orphan_dir_info
*odi
= get_orphan_dir_info(sctx
, dir_ino
);
2876 static void free_orphan_dir_info(struct send_ctx
*sctx
,
2877 struct orphan_dir_info
*odi
)
2881 rb_erase(&odi
->node
, &sctx
->orphan_dirs
);
2886 * Returns 1 if a directory can be removed at this point in time.
2887 * We check this by iterating all dir items and checking if the inode behind
2888 * the dir item was already processed.
2890 static int can_rmdir(struct send_ctx
*sctx
, u64 dir
, u64 dir_gen
,
2894 struct btrfs_root
*root
= sctx
->parent_root
;
2895 struct btrfs_path
*path
;
2896 struct btrfs_key key
;
2897 struct btrfs_key found_key
;
2898 struct btrfs_key loc
;
2899 struct btrfs_dir_item
*di
;
2900 struct orphan_dir_info
*odi
= NULL
;
2903 * Don't try to rmdir the top/root subvolume dir.
2905 if (dir
== BTRFS_FIRST_FREE_OBJECTID
)
2908 path
= alloc_path_for_send();
2913 key
.type
= BTRFS_DIR_INDEX_KEY
;
2916 odi
= get_orphan_dir_info(sctx
, dir
);
2918 key
.offset
= odi
->last_dir_index_offset
;
2920 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
2925 struct waiting_dir_move
*dm
;
2927 if (path
->slots
[0] >= btrfs_header_nritems(path
->nodes
[0])) {
2928 ret
= btrfs_next_leaf(root
, path
);
2935 btrfs_item_key_to_cpu(path
->nodes
[0], &found_key
,
2937 if (found_key
.objectid
!= key
.objectid
||
2938 found_key
.type
!= key
.type
)
2941 di
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
2942 struct btrfs_dir_item
);
2943 btrfs_dir_item_key_to_cpu(path
->nodes
[0], di
, &loc
);
2945 dm
= get_waiting_dir_move(sctx
, loc
.objectid
);
2947 odi
= add_orphan_dir_info(sctx
, dir
);
2953 odi
->last_dir_index_offset
= found_key
.offset
;
2954 dm
->rmdir_ino
= dir
;
2959 if (loc
.objectid
> send_progress
) {
2960 odi
= add_orphan_dir_info(sctx
, dir
);
2966 odi
->last_dir_index_offset
= found_key
.offset
;
2973 free_orphan_dir_info(sctx
, odi
);
2978 btrfs_free_path(path
);
2982 static int is_waiting_for_move(struct send_ctx
*sctx
, u64 ino
)
2984 struct waiting_dir_move
*entry
= get_waiting_dir_move(sctx
, ino
);
2986 return entry
!= NULL
;
2989 static int add_waiting_dir_move(struct send_ctx
*sctx
, u64 ino
, bool orphanized
)
2991 struct rb_node
**p
= &sctx
->waiting_dir_moves
.rb_node
;
2992 struct rb_node
*parent
= NULL
;
2993 struct waiting_dir_move
*entry
, *dm
;
2995 dm
= kmalloc(sizeof(*dm
), GFP_KERNEL
);
3000 dm
->orphanized
= orphanized
;
3004 entry
= rb_entry(parent
, struct waiting_dir_move
, node
);
3005 if (ino
< entry
->ino
) {
3007 } else if (ino
> entry
->ino
) {
3008 p
= &(*p
)->rb_right
;
3015 rb_link_node(&dm
->node
, parent
, p
);
3016 rb_insert_color(&dm
->node
, &sctx
->waiting_dir_moves
);
3020 static struct waiting_dir_move
*
3021 get_waiting_dir_move(struct send_ctx
*sctx
, u64 ino
)
3023 struct rb_node
*n
= sctx
->waiting_dir_moves
.rb_node
;
3024 struct waiting_dir_move
*entry
;
3027 entry
= rb_entry(n
, struct waiting_dir_move
, node
);
3028 if (ino
< entry
->ino
)
3030 else if (ino
> entry
->ino
)
3038 static void free_waiting_dir_move(struct send_ctx
*sctx
,
3039 struct waiting_dir_move
*dm
)
3043 rb_erase(&dm
->node
, &sctx
->waiting_dir_moves
);
3047 static int add_pending_dir_move(struct send_ctx
*sctx
,
3051 struct list_head
*new_refs
,
3052 struct list_head
*deleted_refs
,
3053 const bool is_orphan
)
3055 struct rb_node
**p
= &sctx
->pending_dir_moves
.rb_node
;
3056 struct rb_node
*parent
= NULL
;
3057 struct pending_dir_move
*entry
= NULL
, *pm
;
3058 struct recorded_ref
*cur
;
3062 pm
= kmalloc(sizeof(*pm
), GFP_KERNEL
);
3065 pm
->parent_ino
= parent_ino
;
3068 INIT_LIST_HEAD(&pm
->list
);
3069 INIT_LIST_HEAD(&pm
->update_refs
);
3070 RB_CLEAR_NODE(&pm
->node
);
3074 entry
= rb_entry(parent
, struct pending_dir_move
, node
);
3075 if (parent_ino
< entry
->parent_ino
) {
3077 } else if (parent_ino
> entry
->parent_ino
) {
3078 p
= &(*p
)->rb_right
;
3085 list_for_each_entry(cur
, deleted_refs
, list
) {
3086 ret
= dup_ref(cur
, &pm
->update_refs
);
3090 list_for_each_entry(cur
, new_refs
, list
) {
3091 ret
= dup_ref(cur
, &pm
->update_refs
);
3096 ret
= add_waiting_dir_move(sctx
, pm
->ino
, is_orphan
);
3101 list_add_tail(&pm
->list
, &entry
->list
);
3103 rb_link_node(&pm
->node
, parent
, p
);
3104 rb_insert_color(&pm
->node
, &sctx
->pending_dir_moves
);
3109 __free_recorded_refs(&pm
->update_refs
);
3115 static struct pending_dir_move
*get_pending_dir_moves(struct send_ctx
*sctx
,
3118 struct rb_node
*n
= sctx
->pending_dir_moves
.rb_node
;
3119 struct pending_dir_move
*entry
;
3122 entry
= rb_entry(n
, struct pending_dir_move
, node
);
3123 if (parent_ino
< entry
->parent_ino
)
3125 else if (parent_ino
> entry
->parent_ino
)
3133 static int path_loop(struct send_ctx
*sctx
, struct fs_path
*name
,
3134 u64 ino
, u64 gen
, u64
*ancestor_ino
)
3137 u64 parent_inode
= 0;
3139 u64 start_ino
= ino
;
3142 while (ino
!= BTRFS_FIRST_FREE_OBJECTID
) {
3143 fs_path_reset(name
);
3145 if (is_waiting_for_rm(sctx
, ino
))
3147 if (is_waiting_for_move(sctx
, ino
)) {
3148 if (*ancestor_ino
== 0)
3149 *ancestor_ino
= ino
;
3150 ret
= get_first_ref(sctx
->parent_root
, ino
,
3151 &parent_inode
, &parent_gen
, name
);
3153 ret
= __get_cur_name_and_parent(sctx
, ino
, gen
,
3163 if (parent_inode
== start_ino
) {
3165 if (*ancestor_ino
== 0)
3166 *ancestor_ino
= ino
;
3175 static int apply_dir_move(struct send_ctx
*sctx
, struct pending_dir_move
*pm
)
3177 struct fs_path
*from_path
= NULL
;
3178 struct fs_path
*to_path
= NULL
;
3179 struct fs_path
*name
= NULL
;
3180 u64 orig_progress
= sctx
->send_progress
;
3181 struct recorded_ref
*cur
;
3182 u64 parent_ino
, parent_gen
;
3183 struct waiting_dir_move
*dm
= NULL
;
3189 name
= fs_path_alloc();
3190 from_path
= fs_path_alloc();
3191 if (!name
|| !from_path
) {
3196 dm
= get_waiting_dir_move(sctx
, pm
->ino
);
3198 rmdir_ino
= dm
->rmdir_ino
;
3199 is_orphan
= dm
->orphanized
;
3200 free_waiting_dir_move(sctx
, dm
);
3203 ret
= gen_unique_name(sctx
, pm
->ino
,
3204 pm
->gen
, from_path
);
3206 ret
= get_first_ref(sctx
->parent_root
, pm
->ino
,
3207 &parent_ino
, &parent_gen
, name
);
3210 ret
= get_cur_path(sctx
, parent_ino
, parent_gen
,
3214 ret
= fs_path_add_path(from_path
, name
);
3219 sctx
->send_progress
= sctx
->cur_ino
+ 1;
3220 ret
= path_loop(sctx
, name
, pm
->ino
, pm
->gen
, &ancestor
);
3224 LIST_HEAD(deleted_refs
);
3225 ASSERT(ancestor
> BTRFS_FIRST_FREE_OBJECTID
);
3226 ret
= add_pending_dir_move(sctx
, pm
->ino
, pm
->gen
, ancestor
,
3227 &pm
->update_refs
, &deleted_refs
,
3232 dm
= get_waiting_dir_move(sctx
, pm
->ino
);
3234 dm
->rmdir_ino
= rmdir_ino
;
3238 fs_path_reset(name
);
3241 ret
= get_cur_path(sctx
, pm
->ino
, pm
->gen
, to_path
);
3245 ret
= send_rename(sctx
, from_path
, to_path
);
3250 struct orphan_dir_info
*odi
;
3253 odi
= get_orphan_dir_info(sctx
, rmdir_ino
);
3255 /* already deleted */
3260 ret
= can_rmdir(sctx
, rmdir_ino
, gen
, sctx
->cur_ino
);
3266 name
= fs_path_alloc();
3271 ret
= get_cur_path(sctx
, rmdir_ino
, gen
, name
);
3274 ret
= send_rmdir(sctx
, name
);
3280 ret
= send_utimes(sctx
, pm
->ino
, pm
->gen
);
3285 * After rename/move, need to update the utimes of both new parent(s)
3286 * and old parent(s).
3288 list_for_each_entry(cur
, &pm
->update_refs
, list
) {
3290 * The parent inode might have been deleted in the send snapshot
3292 ret
= get_inode_info(sctx
->send_root
, cur
->dir
, NULL
,
3293 NULL
, NULL
, NULL
, NULL
, NULL
);
3294 if (ret
== -ENOENT
) {
3301 ret
= send_utimes(sctx
, cur
->dir
, cur
->dir_gen
);
3308 fs_path_free(from_path
);
3309 fs_path_free(to_path
);
3310 sctx
->send_progress
= orig_progress
;
3315 static void free_pending_move(struct send_ctx
*sctx
, struct pending_dir_move
*m
)
3317 if (!list_empty(&m
->list
))
3319 if (!RB_EMPTY_NODE(&m
->node
))
3320 rb_erase(&m
->node
, &sctx
->pending_dir_moves
);
3321 __free_recorded_refs(&m
->update_refs
);
3325 static void tail_append_pending_moves(struct send_ctx
*sctx
,
3326 struct pending_dir_move
*moves
,
3327 struct list_head
*stack
)
3329 if (list_empty(&moves
->list
)) {
3330 list_add_tail(&moves
->list
, stack
);
3333 list_splice_init(&moves
->list
, &list
);
3334 list_add_tail(&moves
->list
, stack
);
3335 list_splice_tail(&list
, stack
);
3337 if (!RB_EMPTY_NODE(&moves
->node
)) {
3338 rb_erase(&moves
->node
, &sctx
->pending_dir_moves
);
3339 RB_CLEAR_NODE(&moves
->node
);
3343 static int apply_children_dir_moves(struct send_ctx
*sctx
)
3345 struct pending_dir_move
*pm
;
3346 struct list_head stack
;
3347 u64 parent_ino
= sctx
->cur_ino
;
3350 pm
= get_pending_dir_moves(sctx
, parent_ino
);
3354 INIT_LIST_HEAD(&stack
);
3355 tail_append_pending_moves(sctx
, pm
, &stack
);
3357 while (!list_empty(&stack
)) {
3358 pm
= list_first_entry(&stack
, struct pending_dir_move
, list
);
3359 parent_ino
= pm
->ino
;
3360 ret
= apply_dir_move(sctx
, pm
);
3361 free_pending_move(sctx
, pm
);
3364 pm
= get_pending_dir_moves(sctx
, parent_ino
);
3366 tail_append_pending_moves(sctx
, pm
, &stack
);
3371 while (!list_empty(&stack
)) {
3372 pm
= list_first_entry(&stack
, struct pending_dir_move
, list
);
3373 free_pending_move(sctx
, pm
);
3379 * We might need to delay a directory rename even when no ancestor directory
3380 * (in the send root) with a higher inode number than ours (sctx->cur_ino) was
3381 * renamed. This happens when we rename a directory to the old name (the name
3382 * in the parent root) of some other unrelated directory that got its rename
3383 * delayed due to some ancestor with higher number that got renamed.
3389 * |---- a/ (ino 257)
3390 * | |---- file (ino 260)
3392 * |---- b/ (ino 258)
3393 * |---- c/ (ino 259)
3397 * |---- a/ (ino 258)
3398 * |---- x/ (ino 259)
3399 * |---- y/ (ino 257)
3400 * |----- file (ino 260)
3402 * Here we can not rename 258 from 'b' to 'a' without the rename of inode 257
3403 * from 'a' to 'x/y' happening first, which in turn depends on the rename of
3404 * inode 259 from 'c' to 'x'. So the order of rename commands the send stream
3407 * 1 - rename 259 from 'c' to 'x'
3408 * 2 - rename 257 from 'a' to 'x/y'
3409 * 3 - rename 258 from 'b' to 'a'
3411 * Returns 1 if the rename of sctx->cur_ino needs to be delayed, 0 if it can
3412 * be done right away and < 0 on error.
3414 static int wait_for_dest_dir_move(struct send_ctx
*sctx
,
3415 struct recorded_ref
*parent_ref
,
3416 const bool is_orphan
)
3418 struct btrfs_fs_info
*fs_info
= sctx
->parent_root
->fs_info
;
3419 struct btrfs_path
*path
;
3420 struct btrfs_key key
;
3421 struct btrfs_key di_key
;
3422 struct btrfs_dir_item
*di
;
3426 struct waiting_dir_move
*wdm
;
3428 if (RB_EMPTY_ROOT(&sctx
->waiting_dir_moves
))
3431 path
= alloc_path_for_send();
3435 key
.objectid
= parent_ref
->dir
;
3436 key
.type
= BTRFS_DIR_ITEM_KEY
;
3437 key
.offset
= btrfs_name_hash(parent_ref
->name
, parent_ref
->name_len
);
3439 ret
= btrfs_search_slot(NULL
, sctx
->parent_root
, &key
, path
, 0, 0);
3442 } else if (ret
> 0) {
3447 di
= btrfs_match_dir_item_name(fs_info
, path
, parent_ref
->name
,
3448 parent_ref
->name_len
);
3454 * di_key.objectid has the number of the inode that has a dentry in the
3455 * parent directory with the same name that sctx->cur_ino is being
3456 * renamed to. We need to check if that inode is in the send root as
3457 * well and if it is currently marked as an inode with a pending rename,
3458 * if it is, we need to delay the rename of sctx->cur_ino as well, so
3459 * that it happens after that other inode is renamed.
3461 btrfs_dir_item_key_to_cpu(path
->nodes
[0], di
, &di_key
);
3462 if (di_key
.type
!= BTRFS_INODE_ITEM_KEY
) {
3467 ret
= get_inode_info(sctx
->parent_root
, di_key
.objectid
, NULL
,
3468 &left_gen
, NULL
, NULL
, NULL
, NULL
);
3471 ret
= get_inode_info(sctx
->send_root
, di_key
.objectid
, NULL
,
3472 &right_gen
, NULL
, NULL
, NULL
, NULL
);
3479 /* Different inode, no need to delay the rename of sctx->cur_ino */
3480 if (right_gen
!= left_gen
) {
3485 wdm
= get_waiting_dir_move(sctx
, di_key
.objectid
);
3486 if (wdm
&& !wdm
->orphanized
) {
3487 ret
= add_pending_dir_move(sctx
,
3489 sctx
->cur_inode_gen
,
3492 &sctx
->deleted_refs
,
3498 btrfs_free_path(path
);
3503 * Check if inode ino2, or any of its ancestors, is inode ino1.
3504 * Return 1 if true, 0 if false and < 0 on error.
3506 static int check_ino_in_path(struct btrfs_root
*root
,
3511 struct fs_path
*fs_path
)
3516 return ino1_gen
== ino2_gen
;
3518 while (ino
> BTRFS_FIRST_FREE_OBJECTID
) {
3523 fs_path_reset(fs_path
);
3524 ret
= get_first_ref(root
, ino
, &parent
, &parent_gen
, fs_path
);
3528 return parent_gen
== ino1_gen
;
3535 * Check if ino ino1 is an ancestor of inode ino2 in the given root for any
3536 * possible path (in case ino2 is not a directory and has multiple hard links).
3537 * Return 1 if true, 0 if false and < 0 on error.
3539 static int is_ancestor(struct btrfs_root
*root
,
3543 struct fs_path
*fs_path
)
3545 bool free_fs_path
= false;
3547 struct btrfs_path
*path
= NULL
;
3548 struct btrfs_key key
;
3551 fs_path
= fs_path_alloc();
3554 free_fs_path
= true;
3557 path
= alloc_path_for_send();
3563 key
.objectid
= ino2
;
3564 key
.type
= BTRFS_INODE_REF_KEY
;
3567 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
3572 struct extent_buffer
*leaf
= path
->nodes
[0];
3573 int slot
= path
->slots
[0];
3577 if (slot
>= btrfs_header_nritems(leaf
)) {
3578 ret
= btrfs_next_leaf(root
, path
);
3586 btrfs_item_key_to_cpu(leaf
, &key
, slot
);
3587 if (key
.objectid
!= ino2
)
3589 if (key
.type
!= BTRFS_INODE_REF_KEY
&&
3590 key
.type
!= BTRFS_INODE_EXTREF_KEY
)
3593 item_size
= btrfs_item_size_nr(leaf
, slot
);
3594 while (cur_offset
< item_size
) {
3598 if (key
.type
== BTRFS_INODE_EXTREF_KEY
) {
3600 struct btrfs_inode_extref
*extref
;
3602 ptr
= btrfs_item_ptr_offset(leaf
, slot
);
3603 extref
= (struct btrfs_inode_extref
*)
3605 parent
= btrfs_inode_extref_parent(leaf
,
3607 cur_offset
+= sizeof(*extref
);
3608 cur_offset
+= btrfs_inode_extref_name_len(leaf
,
3611 parent
= key
.offset
;
3612 cur_offset
= item_size
;
3615 ret
= get_inode_info(root
, parent
, NULL
, &parent_gen
,
3616 NULL
, NULL
, NULL
, NULL
);
3619 ret
= check_ino_in_path(root
, ino1
, ino1_gen
,
3620 parent
, parent_gen
, fs_path
);
3628 btrfs_free_path(path
);
3630 fs_path_free(fs_path
);
3634 static int wait_for_parent_move(struct send_ctx
*sctx
,
3635 struct recorded_ref
*parent_ref
,
3636 const bool is_orphan
)
3639 u64 ino
= parent_ref
->dir
;
3640 u64 ino_gen
= parent_ref
->dir_gen
;
3641 u64 parent_ino_before
, parent_ino_after
;
3642 struct fs_path
*path_before
= NULL
;
3643 struct fs_path
*path_after
= NULL
;
3646 path_after
= fs_path_alloc();
3647 path_before
= fs_path_alloc();
3648 if (!path_after
|| !path_before
) {
3654 * Our current directory inode may not yet be renamed/moved because some
3655 * ancestor (immediate or not) has to be renamed/moved first. So find if
3656 * such ancestor exists and make sure our own rename/move happens after
3657 * that ancestor is processed to avoid path build infinite loops (done
3658 * at get_cur_path()).
3660 while (ino
> BTRFS_FIRST_FREE_OBJECTID
) {
3661 u64 parent_ino_after_gen
;
3663 if (is_waiting_for_move(sctx
, ino
)) {
3665 * If the current inode is an ancestor of ino in the
3666 * parent root, we need to delay the rename of the
3667 * current inode, otherwise don't delayed the rename
3668 * because we can end up with a circular dependency
3669 * of renames, resulting in some directories never
3670 * getting the respective rename operations issued in
3671 * the send stream or getting into infinite path build
3674 ret
= is_ancestor(sctx
->parent_root
,
3675 sctx
->cur_ino
, sctx
->cur_inode_gen
,
3681 fs_path_reset(path_before
);
3682 fs_path_reset(path_after
);
3684 ret
= get_first_ref(sctx
->send_root
, ino
, &parent_ino_after
,
3685 &parent_ino_after_gen
, path_after
);
3688 ret
= get_first_ref(sctx
->parent_root
, ino
, &parent_ino_before
,
3690 if (ret
< 0 && ret
!= -ENOENT
) {
3692 } else if (ret
== -ENOENT
) {
3697 len1
= fs_path_len(path_before
);
3698 len2
= fs_path_len(path_after
);
3699 if (ino
> sctx
->cur_ino
&&
3700 (parent_ino_before
!= parent_ino_after
|| len1
!= len2
||
3701 memcmp(path_before
->start
, path_after
->start
, len1
))) {
3704 ret
= get_inode_info(sctx
->parent_root
, ino
, NULL
,
3705 &parent_ino_gen
, NULL
, NULL
, NULL
,
3709 if (ino_gen
== parent_ino_gen
) {
3714 ino
= parent_ino_after
;
3715 ino_gen
= parent_ino_after_gen
;
3719 fs_path_free(path_before
);
3720 fs_path_free(path_after
);
3723 ret
= add_pending_dir_move(sctx
,
3725 sctx
->cur_inode_gen
,
3728 &sctx
->deleted_refs
,
3737 static int update_ref_path(struct send_ctx
*sctx
, struct recorded_ref
*ref
)
3740 struct fs_path
*new_path
;
3743 * Our reference's name member points to its full_path member string, so
3744 * we use here a new path.
3746 new_path
= fs_path_alloc();
3750 ret
= get_cur_path(sctx
, ref
->dir
, ref
->dir_gen
, new_path
);
3752 fs_path_free(new_path
);
3755 ret
= fs_path_add(new_path
, ref
->name
, ref
->name_len
);
3757 fs_path_free(new_path
);
3761 fs_path_free(ref
->full_path
);
3762 set_ref_path(ref
, new_path
);
3768 * This does all the move/link/unlink/rmdir magic.
3770 static int process_recorded_refs(struct send_ctx
*sctx
, int *pending_move
)
3772 struct btrfs_fs_info
*fs_info
= sctx
->send_root
->fs_info
;
3774 struct recorded_ref
*cur
;
3775 struct recorded_ref
*cur2
;
3776 struct list_head check_dirs
;
3777 struct fs_path
*valid_path
= NULL
;
3781 int did_overwrite
= 0;
3783 u64 last_dir_ino_rm
= 0;
3784 bool can_rename
= true;
3785 bool orphanized_dir
= false;
3786 bool orphanized_ancestor
= false;
3788 btrfs_debug(fs_info
, "process_recorded_refs %llu", sctx
->cur_ino
);
3791 * This should never happen as the root dir always has the same ref
3792 * which is always '..'
3794 BUG_ON(sctx
->cur_ino
<= BTRFS_FIRST_FREE_OBJECTID
);
3795 INIT_LIST_HEAD(&check_dirs
);
3797 valid_path
= fs_path_alloc();
3804 * First, check if the first ref of the current inode was overwritten
3805 * before. If yes, we know that the current inode was already orphanized
3806 * and thus use the orphan name. If not, we can use get_cur_path to
3807 * get the path of the first ref as it would like while receiving at
3808 * this point in time.
3809 * New inodes are always orphan at the beginning, so force to use the
3810 * orphan name in this case.
3811 * The first ref is stored in valid_path and will be updated if it
3812 * gets moved around.
3814 if (!sctx
->cur_inode_new
) {
3815 ret
= did_overwrite_first_ref(sctx
, sctx
->cur_ino
,
3816 sctx
->cur_inode_gen
);
3822 if (sctx
->cur_inode_new
|| did_overwrite
) {
3823 ret
= gen_unique_name(sctx
, sctx
->cur_ino
,
3824 sctx
->cur_inode_gen
, valid_path
);
3829 ret
= get_cur_path(sctx
, sctx
->cur_ino
, sctx
->cur_inode_gen
,
3835 list_for_each_entry(cur
, &sctx
->new_refs
, list
) {
3837 * We may have refs where the parent directory does not exist
3838 * yet. This happens if the parent directories inum is higher
3839 * than the current inum. To handle this case, we create the
3840 * parent directory out of order. But we need to check if this
3841 * did already happen before due to other refs in the same dir.
3843 ret
= get_cur_inode_state(sctx
, cur
->dir
, cur
->dir_gen
);
3846 if (ret
== inode_state_will_create
) {
3849 * First check if any of the current inodes refs did
3850 * already create the dir.
3852 list_for_each_entry(cur2
, &sctx
->new_refs
, list
) {
3855 if (cur2
->dir
== cur
->dir
) {
3862 * If that did not happen, check if a previous inode
3863 * did already create the dir.
3866 ret
= did_create_dir(sctx
, cur
->dir
);
3870 ret
= send_create_inode(sctx
, cur
->dir
);
3877 * Check if this new ref would overwrite the first ref of
3878 * another unprocessed inode. If yes, orphanize the
3879 * overwritten inode. If we find an overwritten ref that is
3880 * not the first ref, simply unlink it.
3882 ret
= will_overwrite_ref(sctx
, cur
->dir
, cur
->dir_gen
,
3883 cur
->name
, cur
->name_len
,
3884 &ow_inode
, &ow_gen
, &ow_mode
);
3888 ret
= is_first_ref(sctx
->parent_root
,
3889 ow_inode
, cur
->dir
, cur
->name
,
3894 struct name_cache_entry
*nce
;
3895 struct waiting_dir_move
*wdm
;
3897 ret
= orphanize_inode(sctx
, ow_inode
, ow_gen
,
3901 if (S_ISDIR(ow_mode
))
3902 orphanized_dir
= true;
3905 * If ow_inode has its rename operation delayed
3906 * make sure that its orphanized name is used in
3907 * the source path when performing its rename
3910 if (is_waiting_for_move(sctx
, ow_inode
)) {
3911 wdm
= get_waiting_dir_move(sctx
,
3914 wdm
->orphanized
= true;
3918 * Make sure we clear our orphanized inode's
3919 * name from the name cache. This is because the
3920 * inode ow_inode might be an ancestor of some
3921 * other inode that will be orphanized as well
3922 * later and has an inode number greater than
3923 * sctx->send_progress. We need to prevent
3924 * future name lookups from using the old name
3925 * and get instead the orphan name.
3927 nce
= name_cache_search(sctx
, ow_inode
, ow_gen
);
3929 name_cache_delete(sctx
, nce
);
3934 * ow_inode might currently be an ancestor of
3935 * cur_ino, therefore compute valid_path (the
3936 * current path of cur_ino) again because it
3937 * might contain the pre-orphanization name of
3938 * ow_inode, which is no longer valid.
3940 ret
= is_ancestor(sctx
->parent_root
,
3942 sctx
->cur_ino
, NULL
);
3944 orphanized_ancestor
= true;
3945 fs_path_reset(valid_path
);
3946 ret
= get_cur_path(sctx
, sctx
->cur_ino
,
3947 sctx
->cur_inode_gen
,
3953 ret
= send_unlink(sctx
, cur
->full_path
);
3959 if (S_ISDIR(sctx
->cur_inode_mode
) && sctx
->parent_root
) {
3960 ret
= wait_for_dest_dir_move(sctx
, cur
, is_orphan
);
3969 if (S_ISDIR(sctx
->cur_inode_mode
) && sctx
->parent_root
&&
3971 ret
= wait_for_parent_move(sctx
, cur
, is_orphan
);
3981 * link/move the ref to the new place. If we have an orphan
3982 * inode, move it and update valid_path. If not, link or move
3983 * it depending on the inode mode.
3985 if (is_orphan
&& can_rename
) {
3986 ret
= send_rename(sctx
, valid_path
, cur
->full_path
);
3990 ret
= fs_path_copy(valid_path
, cur
->full_path
);
3993 } else if (can_rename
) {
3994 if (S_ISDIR(sctx
->cur_inode_mode
)) {
3996 * Dirs can't be linked, so move it. For moved
3997 * dirs, we always have one new and one deleted
3998 * ref. The deleted ref is ignored later.
4000 ret
= send_rename(sctx
, valid_path
,
4003 ret
= fs_path_copy(valid_path
,
4009 * We might have previously orphanized an inode
4010 * which is an ancestor of our current inode,
4011 * so our reference's full path, which was
4012 * computed before any such orphanizations, must
4015 if (orphanized_dir
) {
4016 ret
= update_ref_path(sctx
, cur
);
4020 ret
= send_link(sctx
, cur
->full_path
,
4026 ret
= dup_ref(cur
, &check_dirs
);
4031 if (S_ISDIR(sctx
->cur_inode_mode
) && sctx
->cur_inode_deleted
) {
4033 * Check if we can already rmdir the directory. If not,
4034 * orphanize it. For every dir item inside that gets deleted
4035 * later, we do this check again and rmdir it then if possible.
4036 * See the use of check_dirs for more details.
4038 ret
= can_rmdir(sctx
, sctx
->cur_ino
, sctx
->cur_inode_gen
,
4043 ret
= send_rmdir(sctx
, valid_path
);
4046 } else if (!is_orphan
) {
4047 ret
= orphanize_inode(sctx
, sctx
->cur_ino
,
4048 sctx
->cur_inode_gen
, valid_path
);
4054 list_for_each_entry(cur
, &sctx
->deleted_refs
, list
) {
4055 ret
= dup_ref(cur
, &check_dirs
);
4059 } else if (S_ISDIR(sctx
->cur_inode_mode
) &&
4060 !list_empty(&sctx
->deleted_refs
)) {
4062 * We have a moved dir. Add the old parent to check_dirs
4064 cur
= list_entry(sctx
->deleted_refs
.next
, struct recorded_ref
,
4066 ret
= dup_ref(cur
, &check_dirs
);
4069 } else if (!S_ISDIR(sctx
->cur_inode_mode
)) {
4071 * We have a non dir inode. Go through all deleted refs and
4072 * unlink them if they were not already overwritten by other
4075 list_for_each_entry(cur
, &sctx
->deleted_refs
, list
) {
4076 ret
= did_overwrite_ref(sctx
, cur
->dir
, cur
->dir_gen
,
4077 sctx
->cur_ino
, sctx
->cur_inode_gen
,
4078 cur
->name
, cur
->name_len
);
4083 * If we orphanized any ancestor before, we need
4084 * to recompute the full path for deleted names,
4085 * since any such path was computed before we
4086 * processed any references and orphanized any
4089 if (orphanized_ancestor
) {
4090 ret
= update_ref_path(sctx
, cur
);
4094 ret
= send_unlink(sctx
, cur
->full_path
);
4098 ret
= dup_ref(cur
, &check_dirs
);
4103 * If the inode is still orphan, unlink the orphan. This may
4104 * happen when a previous inode did overwrite the first ref
4105 * of this inode and no new refs were added for the current
4106 * inode. Unlinking does not mean that the inode is deleted in
4107 * all cases. There may still be links to this inode in other
4111 ret
= send_unlink(sctx
, valid_path
);
4118 * We did collect all parent dirs where cur_inode was once located. We
4119 * now go through all these dirs and check if they are pending for
4120 * deletion and if it's finally possible to perform the rmdir now.
4121 * We also update the inode stats of the parent dirs here.
4123 list_for_each_entry(cur
, &check_dirs
, list
) {
4125 * In case we had refs into dirs that were not processed yet,
4126 * we don't need to do the utime and rmdir logic for these dirs.
4127 * The dir will be processed later.
4129 if (cur
->dir
> sctx
->cur_ino
)
4132 ret
= get_cur_inode_state(sctx
, cur
->dir
, cur
->dir_gen
);
4136 if (ret
== inode_state_did_create
||
4137 ret
== inode_state_no_change
) {
4138 /* TODO delayed utimes */
4139 ret
= send_utimes(sctx
, cur
->dir
, cur
->dir_gen
);
4142 } else if (ret
== inode_state_did_delete
&&
4143 cur
->dir
!= last_dir_ino_rm
) {
4144 ret
= can_rmdir(sctx
, cur
->dir
, cur
->dir_gen
,
4149 ret
= get_cur_path(sctx
, cur
->dir
,
4150 cur
->dir_gen
, valid_path
);
4153 ret
= send_rmdir(sctx
, valid_path
);
4156 last_dir_ino_rm
= cur
->dir
;
4164 __free_recorded_refs(&check_dirs
);
4165 free_recorded_refs(sctx
);
4166 fs_path_free(valid_path
);
4170 static int record_ref(struct btrfs_root
*root
, u64 dir
, struct fs_path
*name
,
4171 void *ctx
, struct list_head
*refs
)
4174 struct send_ctx
*sctx
= ctx
;
4178 p
= fs_path_alloc();
4182 ret
= get_inode_info(root
, dir
, NULL
, &gen
, NULL
, NULL
,
4187 ret
= get_cur_path(sctx
, dir
, gen
, p
);
4190 ret
= fs_path_add_path(p
, name
);
4194 ret
= __record_ref(refs
, dir
, gen
, p
);
4202 static int __record_new_ref(int num
, u64 dir
, int index
,
4203 struct fs_path
*name
,
4206 struct send_ctx
*sctx
= ctx
;
4207 return record_ref(sctx
->send_root
, dir
, name
, ctx
, &sctx
->new_refs
);
4211 static int __record_deleted_ref(int num
, u64 dir
, int index
,
4212 struct fs_path
*name
,
4215 struct send_ctx
*sctx
= ctx
;
4216 return record_ref(sctx
->parent_root
, dir
, name
, ctx
,
4217 &sctx
->deleted_refs
);
4220 static int record_new_ref(struct send_ctx
*sctx
)
4224 ret
= iterate_inode_ref(sctx
->send_root
, sctx
->left_path
,
4225 sctx
->cmp_key
, 0, __record_new_ref
, sctx
);
4234 static int record_deleted_ref(struct send_ctx
*sctx
)
4238 ret
= iterate_inode_ref(sctx
->parent_root
, sctx
->right_path
,
4239 sctx
->cmp_key
, 0, __record_deleted_ref
, sctx
);
4248 struct find_ref_ctx
{
4251 struct btrfs_root
*root
;
4252 struct fs_path
*name
;
4256 static int __find_iref(int num
, u64 dir
, int index
,
4257 struct fs_path
*name
,
4260 struct find_ref_ctx
*ctx
= ctx_
;
4264 if (dir
== ctx
->dir
&& fs_path_len(name
) == fs_path_len(ctx
->name
) &&
4265 strncmp(name
->start
, ctx
->name
->start
, fs_path_len(name
)) == 0) {
4267 * To avoid doing extra lookups we'll only do this if everything
4270 ret
= get_inode_info(ctx
->root
, dir
, NULL
, &dir_gen
, NULL
,
4274 if (dir_gen
!= ctx
->dir_gen
)
4276 ctx
->found_idx
= num
;
4282 static int find_iref(struct btrfs_root
*root
,
4283 struct btrfs_path
*path
,
4284 struct btrfs_key
*key
,
4285 u64 dir
, u64 dir_gen
, struct fs_path
*name
)
4288 struct find_ref_ctx ctx
;
4292 ctx
.dir_gen
= dir_gen
;
4296 ret
= iterate_inode_ref(root
, path
, key
, 0, __find_iref
, &ctx
);
4300 if (ctx
.found_idx
== -1)
4303 return ctx
.found_idx
;
4306 static int __record_changed_new_ref(int num
, u64 dir
, int index
,
4307 struct fs_path
*name
,
4312 struct send_ctx
*sctx
= ctx
;
4314 ret
= get_inode_info(sctx
->send_root
, dir
, NULL
, &dir_gen
, NULL
,
4319 ret
= find_iref(sctx
->parent_root
, sctx
->right_path
,
4320 sctx
->cmp_key
, dir
, dir_gen
, name
);
4322 ret
= __record_new_ref(num
, dir
, index
, name
, sctx
);
4329 static int __record_changed_deleted_ref(int num
, u64 dir
, int index
,
4330 struct fs_path
*name
,
4335 struct send_ctx
*sctx
= ctx
;
4337 ret
= get_inode_info(sctx
->parent_root
, dir
, NULL
, &dir_gen
, NULL
,
4342 ret
= find_iref(sctx
->send_root
, sctx
->left_path
, sctx
->cmp_key
,
4343 dir
, dir_gen
, name
);
4345 ret
= __record_deleted_ref(num
, dir
, index
, name
, sctx
);
4352 static int record_changed_ref(struct send_ctx
*sctx
)
4356 ret
= iterate_inode_ref(sctx
->send_root
, sctx
->left_path
,
4357 sctx
->cmp_key
, 0, __record_changed_new_ref
, sctx
);
4360 ret
= iterate_inode_ref(sctx
->parent_root
, sctx
->right_path
,
4361 sctx
->cmp_key
, 0, __record_changed_deleted_ref
, sctx
);
4371 * Record and process all refs at once. Needed when an inode changes the
4372 * generation number, which means that it was deleted and recreated.
4374 static int process_all_refs(struct send_ctx
*sctx
,
4375 enum btrfs_compare_tree_result cmd
)
4378 struct btrfs_root
*root
;
4379 struct btrfs_path
*path
;
4380 struct btrfs_key key
;
4381 struct btrfs_key found_key
;
4382 struct extent_buffer
*eb
;
4384 iterate_inode_ref_t cb
;
4385 int pending_move
= 0;
4387 path
= alloc_path_for_send();
4391 if (cmd
== BTRFS_COMPARE_TREE_NEW
) {
4392 root
= sctx
->send_root
;
4393 cb
= __record_new_ref
;
4394 } else if (cmd
== BTRFS_COMPARE_TREE_DELETED
) {
4395 root
= sctx
->parent_root
;
4396 cb
= __record_deleted_ref
;
4398 btrfs_err(sctx
->send_root
->fs_info
,
4399 "Wrong command %d in process_all_refs", cmd
);
4404 key
.objectid
= sctx
->cmp_key
->objectid
;
4405 key
.type
= BTRFS_INODE_REF_KEY
;
4407 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
4412 eb
= path
->nodes
[0];
4413 slot
= path
->slots
[0];
4414 if (slot
>= btrfs_header_nritems(eb
)) {
4415 ret
= btrfs_next_leaf(root
, path
);
4423 btrfs_item_key_to_cpu(eb
, &found_key
, slot
);
4425 if (found_key
.objectid
!= key
.objectid
||
4426 (found_key
.type
!= BTRFS_INODE_REF_KEY
&&
4427 found_key
.type
!= BTRFS_INODE_EXTREF_KEY
))
4430 ret
= iterate_inode_ref(root
, path
, &found_key
, 0, cb
, sctx
);
4436 btrfs_release_path(path
);
4439 * We don't actually care about pending_move as we are simply
4440 * re-creating this inode and will be rename'ing it into place once we
4441 * rename the parent directory.
4443 ret
= process_recorded_refs(sctx
, &pending_move
);
4445 btrfs_free_path(path
);
4449 static int send_set_xattr(struct send_ctx
*sctx
,
4450 struct fs_path
*path
,
4451 const char *name
, int name_len
,
4452 const char *data
, int data_len
)
4456 ret
= begin_cmd(sctx
, BTRFS_SEND_C_SET_XATTR
);
4460 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH
, path
);
4461 TLV_PUT_STRING(sctx
, BTRFS_SEND_A_XATTR_NAME
, name
, name_len
);
4462 TLV_PUT(sctx
, BTRFS_SEND_A_XATTR_DATA
, data
, data_len
);
4464 ret
= send_cmd(sctx
);
4471 static int send_remove_xattr(struct send_ctx
*sctx
,
4472 struct fs_path
*path
,
4473 const char *name
, int name_len
)
4477 ret
= begin_cmd(sctx
, BTRFS_SEND_C_REMOVE_XATTR
);
4481 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH
, path
);
4482 TLV_PUT_STRING(sctx
, BTRFS_SEND_A_XATTR_NAME
, name
, name_len
);
4484 ret
= send_cmd(sctx
);
4491 static int __process_new_xattr(int num
, struct btrfs_key
*di_key
,
4492 const char *name
, int name_len
,
4493 const char *data
, int data_len
,
4497 struct send_ctx
*sctx
= ctx
;
4499 struct posix_acl_xattr_header dummy_acl
;
4501 p
= fs_path_alloc();
4506 * This hack is needed because empty acls are stored as zero byte
4507 * data in xattrs. Problem with that is, that receiving these zero byte
4508 * acls will fail later. To fix this, we send a dummy acl list that
4509 * only contains the version number and no entries.
4511 if (!strncmp(name
, XATTR_NAME_POSIX_ACL_ACCESS
, name_len
) ||
4512 !strncmp(name
, XATTR_NAME_POSIX_ACL_DEFAULT
, name_len
)) {
4513 if (data_len
== 0) {
4514 dummy_acl
.a_version
=
4515 cpu_to_le32(POSIX_ACL_XATTR_VERSION
);
4516 data
= (char *)&dummy_acl
;
4517 data_len
= sizeof(dummy_acl
);
4521 ret
= get_cur_path(sctx
, sctx
->cur_ino
, sctx
->cur_inode_gen
, p
);
4525 ret
= send_set_xattr(sctx
, p
, name
, name_len
, data
, data_len
);
4532 static int __process_deleted_xattr(int num
, struct btrfs_key
*di_key
,
4533 const char *name
, int name_len
,
4534 const char *data
, int data_len
,
4538 struct send_ctx
*sctx
= ctx
;
4541 p
= fs_path_alloc();
4545 ret
= get_cur_path(sctx
, sctx
->cur_ino
, sctx
->cur_inode_gen
, p
);
4549 ret
= send_remove_xattr(sctx
, p
, name
, name_len
);
4556 static int process_new_xattr(struct send_ctx
*sctx
)
4560 ret
= iterate_dir_item(sctx
->send_root
, sctx
->left_path
,
4561 __process_new_xattr
, sctx
);
4566 static int process_deleted_xattr(struct send_ctx
*sctx
)
4568 return iterate_dir_item(sctx
->parent_root
, sctx
->right_path
,
4569 __process_deleted_xattr
, sctx
);
4572 struct find_xattr_ctx
{
4580 static int __find_xattr(int num
, struct btrfs_key
*di_key
,
4581 const char *name
, int name_len
,
4582 const char *data
, int data_len
,
4583 u8 type
, void *vctx
)
4585 struct find_xattr_ctx
*ctx
= vctx
;
4587 if (name_len
== ctx
->name_len
&&
4588 strncmp(name
, ctx
->name
, name_len
) == 0) {
4589 ctx
->found_idx
= num
;
4590 ctx
->found_data_len
= data_len
;
4591 ctx
->found_data
= kmemdup(data
, data_len
, GFP_KERNEL
);
4592 if (!ctx
->found_data
)
4599 static int find_xattr(struct btrfs_root
*root
,
4600 struct btrfs_path
*path
,
4601 struct btrfs_key
*key
,
4602 const char *name
, int name_len
,
4603 char **data
, int *data_len
)
4606 struct find_xattr_ctx ctx
;
4609 ctx
.name_len
= name_len
;
4611 ctx
.found_data
= NULL
;
4612 ctx
.found_data_len
= 0;
4614 ret
= iterate_dir_item(root
, path
, __find_xattr
, &ctx
);
4618 if (ctx
.found_idx
== -1)
4621 *data
= ctx
.found_data
;
4622 *data_len
= ctx
.found_data_len
;
4624 kfree(ctx
.found_data
);
4626 return ctx
.found_idx
;
4630 static int __process_changed_new_xattr(int num
, struct btrfs_key
*di_key
,
4631 const char *name
, int name_len
,
4632 const char *data
, int data_len
,
4636 struct send_ctx
*sctx
= ctx
;
4637 char *found_data
= NULL
;
4638 int found_data_len
= 0;
4640 ret
= find_xattr(sctx
->parent_root
, sctx
->right_path
,
4641 sctx
->cmp_key
, name
, name_len
, &found_data
,
4643 if (ret
== -ENOENT
) {
4644 ret
= __process_new_xattr(num
, di_key
, name
, name_len
, data
,
4645 data_len
, type
, ctx
);
4646 } else if (ret
>= 0) {
4647 if (data_len
!= found_data_len
||
4648 memcmp(data
, found_data
, data_len
)) {
4649 ret
= __process_new_xattr(num
, di_key
, name
, name_len
,
4650 data
, data_len
, type
, ctx
);
4660 static int __process_changed_deleted_xattr(int num
, struct btrfs_key
*di_key
,
4661 const char *name
, int name_len
,
4662 const char *data
, int data_len
,
4666 struct send_ctx
*sctx
= ctx
;
4668 ret
= find_xattr(sctx
->send_root
, sctx
->left_path
, sctx
->cmp_key
,
4669 name
, name_len
, NULL
, NULL
);
4671 ret
= __process_deleted_xattr(num
, di_key
, name
, name_len
, data
,
4672 data_len
, type
, ctx
);
4679 static int process_changed_xattr(struct send_ctx
*sctx
)
4683 ret
= iterate_dir_item(sctx
->send_root
, sctx
->left_path
,
4684 __process_changed_new_xattr
, sctx
);
4687 ret
= iterate_dir_item(sctx
->parent_root
, sctx
->right_path
,
4688 __process_changed_deleted_xattr
, sctx
);
4694 static int process_all_new_xattrs(struct send_ctx
*sctx
)
4697 struct btrfs_root
*root
;
4698 struct btrfs_path
*path
;
4699 struct btrfs_key key
;
4700 struct btrfs_key found_key
;
4701 struct extent_buffer
*eb
;
4704 path
= alloc_path_for_send();
4708 root
= sctx
->send_root
;
4710 key
.objectid
= sctx
->cmp_key
->objectid
;
4711 key
.type
= BTRFS_XATTR_ITEM_KEY
;
4713 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
4718 eb
= path
->nodes
[0];
4719 slot
= path
->slots
[0];
4720 if (slot
>= btrfs_header_nritems(eb
)) {
4721 ret
= btrfs_next_leaf(root
, path
);
4724 } else if (ret
> 0) {
4731 btrfs_item_key_to_cpu(eb
, &found_key
, slot
);
4732 if (found_key
.objectid
!= key
.objectid
||
4733 found_key
.type
!= key
.type
) {
4738 ret
= iterate_dir_item(root
, path
, __process_new_xattr
, sctx
);
4746 btrfs_free_path(path
);
4750 static ssize_t
fill_read_buf(struct send_ctx
*sctx
, u64 offset
, u32 len
)
4752 struct btrfs_root
*root
= sctx
->send_root
;
4753 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
4754 struct inode
*inode
;
4757 struct btrfs_key key
;
4758 pgoff_t index
= offset
>> PAGE_SHIFT
;
4760 unsigned pg_offset
= offset_in_page(offset
);
4763 key
.objectid
= sctx
->cur_ino
;
4764 key
.type
= BTRFS_INODE_ITEM_KEY
;
4767 inode
= btrfs_iget(fs_info
->sb
, &key
, root
, NULL
);
4769 return PTR_ERR(inode
);
4771 if (offset
+ len
> i_size_read(inode
)) {
4772 if (offset
> i_size_read(inode
))
4775 len
= offset
- i_size_read(inode
);
4780 last_index
= (offset
+ len
- 1) >> PAGE_SHIFT
;
4782 /* initial readahead */
4783 memset(&sctx
->ra
, 0, sizeof(struct file_ra_state
));
4784 file_ra_state_init(&sctx
->ra
, inode
->i_mapping
);
4786 while (index
<= last_index
) {
4787 unsigned cur_len
= min_t(unsigned, len
,
4788 PAGE_SIZE
- pg_offset
);
4790 page
= find_lock_page(inode
->i_mapping
, index
);
4792 page_cache_sync_readahead(inode
->i_mapping
, &sctx
->ra
,
4793 NULL
, index
, last_index
+ 1 - index
);
4795 page
= find_or_create_page(inode
->i_mapping
, index
,
4803 if (PageReadahead(page
)) {
4804 page_cache_async_readahead(inode
->i_mapping
, &sctx
->ra
,
4805 NULL
, page
, index
, last_index
+ 1 - index
);
4808 if (!PageUptodate(page
)) {
4809 btrfs_readpage(NULL
, page
);
4811 if (!PageUptodate(page
)) {
4820 memcpy(sctx
->read_buf
+ ret
, addr
+ pg_offset
, cur_len
);
4835 * Read some bytes from the current inode/file and send a write command to
4838 static int send_write(struct send_ctx
*sctx
, u64 offset
, u32 len
)
4840 struct btrfs_fs_info
*fs_info
= sctx
->send_root
->fs_info
;
4843 ssize_t num_read
= 0;
4845 p
= fs_path_alloc();
4849 btrfs_debug(fs_info
, "send_write offset=%llu, len=%d", offset
, len
);
4851 num_read
= fill_read_buf(sctx
, offset
, len
);
4852 if (num_read
<= 0) {
4858 ret
= begin_cmd(sctx
, BTRFS_SEND_C_WRITE
);
4862 ret
= get_cur_path(sctx
, sctx
->cur_ino
, sctx
->cur_inode_gen
, p
);
4866 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH
, p
);
4867 TLV_PUT_U64(sctx
, BTRFS_SEND_A_FILE_OFFSET
, offset
);
4868 TLV_PUT(sctx
, BTRFS_SEND_A_DATA
, sctx
->read_buf
, num_read
);
4870 ret
= send_cmd(sctx
);
4881 * Send a clone command to user space.
4883 static int send_clone(struct send_ctx
*sctx
,
4884 u64 offset
, u32 len
,
4885 struct clone_root
*clone_root
)
4891 btrfs_debug(sctx
->send_root
->fs_info
,
4892 "send_clone offset=%llu, len=%d, clone_root=%llu, clone_inode=%llu, clone_offset=%llu",
4893 offset
, len
, clone_root
->root
->root_key
.objectid
,
4894 clone_root
->ino
, clone_root
->offset
);
4896 p
= fs_path_alloc();
4900 ret
= begin_cmd(sctx
, BTRFS_SEND_C_CLONE
);
4904 ret
= get_cur_path(sctx
, sctx
->cur_ino
, sctx
->cur_inode_gen
, p
);
4908 TLV_PUT_U64(sctx
, BTRFS_SEND_A_FILE_OFFSET
, offset
);
4909 TLV_PUT_U64(sctx
, BTRFS_SEND_A_CLONE_LEN
, len
);
4910 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH
, p
);
4912 if (clone_root
->root
== sctx
->send_root
) {
4913 ret
= get_inode_info(sctx
->send_root
, clone_root
->ino
, NULL
,
4914 &gen
, NULL
, NULL
, NULL
, NULL
);
4917 ret
= get_cur_path(sctx
, clone_root
->ino
, gen
, p
);
4919 ret
= get_inode_path(clone_root
->root
, clone_root
->ino
, p
);
4925 * If the parent we're using has a received_uuid set then use that as
4926 * our clone source as that is what we will look for when doing a
4929 * This covers the case that we create a snapshot off of a received
4930 * subvolume and then use that as the parent and try to receive on a
4933 if (!btrfs_is_empty_uuid(clone_root
->root
->root_item
.received_uuid
))
4934 TLV_PUT_UUID(sctx
, BTRFS_SEND_A_CLONE_UUID
,
4935 clone_root
->root
->root_item
.received_uuid
);
4937 TLV_PUT_UUID(sctx
, BTRFS_SEND_A_CLONE_UUID
,
4938 clone_root
->root
->root_item
.uuid
);
4939 TLV_PUT_U64(sctx
, BTRFS_SEND_A_CLONE_CTRANSID
,
4940 le64_to_cpu(clone_root
->root
->root_item
.ctransid
));
4941 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_CLONE_PATH
, p
);
4942 TLV_PUT_U64(sctx
, BTRFS_SEND_A_CLONE_OFFSET
,
4943 clone_root
->offset
);
4945 ret
= send_cmd(sctx
);
4954 * Send an update extent command to user space.
4956 static int send_update_extent(struct send_ctx
*sctx
,
4957 u64 offset
, u32 len
)
4962 p
= fs_path_alloc();
4966 ret
= begin_cmd(sctx
, BTRFS_SEND_C_UPDATE_EXTENT
);
4970 ret
= get_cur_path(sctx
, sctx
->cur_ino
, sctx
->cur_inode_gen
, p
);
4974 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH
, p
);
4975 TLV_PUT_U64(sctx
, BTRFS_SEND_A_FILE_OFFSET
, offset
);
4976 TLV_PUT_U64(sctx
, BTRFS_SEND_A_SIZE
, len
);
4978 ret
= send_cmd(sctx
);
4986 static int send_hole(struct send_ctx
*sctx
, u64 end
)
4988 struct fs_path
*p
= NULL
;
4989 u64 offset
= sctx
->cur_inode_last_extent
;
4994 * A hole that starts at EOF or beyond it. Since we do not yet support
4995 * fallocate (for extent preallocation and hole punching), sending a
4996 * write of zeroes starting at EOF or beyond would later require issuing
4997 * a truncate operation which would undo the write and achieve nothing.
4999 if (offset
>= sctx
->cur_inode_size
)
5003 * Don't go beyond the inode's i_size due to prealloc extents that start
5006 end
= min_t(u64
, end
, sctx
->cur_inode_size
);
5008 if (sctx
->flags
& BTRFS_SEND_FLAG_NO_FILE_DATA
)
5009 return send_update_extent(sctx
, offset
, end
- offset
);
5011 p
= fs_path_alloc();
5014 ret
= get_cur_path(sctx
, sctx
->cur_ino
, sctx
->cur_inode_gen
, p
);
5016 goto tlv_put_failure
;
5017 memset(sctx
->read_buf
, 0, BTRFS_SEND_READ_SIZE
);
5018 while (offset
< end
) {
5019 len
= min_t(u64
, end
- offset
, BTRFS_SEND_READ_SIZE
);
5021 ret
= begin_cmd(sctx
, BTRFS_SEND_C_WRITE
);
5024 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH
, p
);
5025 TLV_PUT_U64(sctx
, BTRFS_SEND_A_FILE_OFFSET
, offset
);
5026 TLV_PUT(sctx
, BTRFS_SEND_A_DATA
, sctx
->read_buf
, len
);
5027 ret
= send_cmd(sctx
);
5032 sctx
->cur_inode_next_write_offset
= offset
;
5038 static int send_extent_data(struct send_ctx
*sctx
,
5044 if (sctx
->flags
& BTRFS_SEND_FLAG_NO_FILE_DATA
)
5045 return send_update_extent(sctx
, offset
, len
);
5047 while (sent
< len
) {
5048 u64 size
= len
- sent
;
5051 if (size
> BTRFS_SEND_READ_SIZE
)
5052 size
= BTRFS_SEND_READ_SIZE
;
5053 ret
= send_write(sctx
, offset
+ sent
, size
);
5063 static int clone_range(struct send_ctx
*sctx
,
5064 struct clone_root
*clone_root
,
5065 const u64 disk_byte
,
5070 struct btrfs_path
*path
;
5071 struct btrfs_key key
;
5073 u64 clone_src_i_size
;
5076 * Prevent cloning from a zero offset with a length matching the sector
5077 * size because in some scenarios this will make the receiver fail.
5079 * For example, if in the source filesystem the extent at offset 0
5080 * has a length of sectorsize and it was written using direct IO, then
5081 * it can never be an inline extent (even if compression is enabled).
5082 * Then this extent can be cloned in the original filesystem to a non
5083 * zero file offset, but it may not be possible to clone in the
5084 * destination filesystem because it can be inlined due to compression
5085 * on the destination filesystem (as the receiver's write operations are
5086 * always done using buffered IO). The same happens when the original
5087 * filesystem does not have compression enabled but the destination
5090 if (clone_root
->offset
== 0 &&
5091 len
== sctx
->send_root
->fs_info
->sectorsize
)
5092 return send_extent_data(sctx
, offset
, len
);
5094 path
= alloc_path_for_send();
5099 * There are inodes that have extents that lie behind its i_size. Don't
5100 * accept clones from these extents.
5102 ret
= __get_inode_info(clone_root
->root
, path
, clone_root
->ino
,
5103 &clone_src_i_size
, NULL
, NULL
, NULL
, NULL
, NULL
);
5104 btrfs_release_path(path
);
5109 * We can't send a clone operation for the entire range if we find
5110 * extent items in the respective range in the source file that
5111 * refer to different extents or if we find holes.
5112 * So check for that and do a mix of clone and regular write/copy
5113 * operations if needed.
5117 * mkfs.btrfs -f /dev/sda
5118 * mount /dev/sda /mnt
5119 * xfs_io -f -c "pwrite -S 0xaa 0K 100K" /mnt/foo
5120 * cp --reflink=always /mnt/foo /mnt/bar
5121 * xfs_io -c "pwrite -S 0xbb 50K 50K" /mnt/foo
5122 * btrfs subvolume snapshot -r /mnt /mnt/snap
5124 * If when we send the snapshot and we are processing file bar (which
5125 * has a higher inode number than foo) we blindly send a clone operation
5126 * for the [0, 100K[ range from foo to bar, the receiver ends up getting
5127 * a file bar that matches the content of file foo - iow, doesn't match
5128 * the content from bar in the original filesystem.
5130 key
.objectid
= clone_root
->ino
;
5131 key
.type
= BTRFS_EXTENT_DATA_KEY
;
5132 key
.offset
= clone_root
->offset
;
5133 ret
= btrfs_search_slot(NULL
, clone_root
->root
, &key
, path
, 0, 0);
5136 if (ret
> 0 && path
->slots
[0] > 0) {
5137 btrfs_item_key_to_cpu(path
->nodes
[0], &key
, path
->slots
[0] - 1);
5138 if (key
.objectid
== clone_root
->ino
&&
5139 key
.type
== BTRFS_EXTENT_DATA_KEY
)
5144 struct extent_buffer
*leaf
= path
->nodes
[0];
5145 int slot
= path
->slots
[0];
5146 struct btrfs_file_extent_item
*ei
;
5150 u64 clone_data_offset
;
5152 if (slot
>= btrfs_header_nritems(leaf
)) {
5153 ret
= btrfs_next_leaf(clone_root
->root
, path
);
5161 btrfs_item_key_to_cpu(leaf
, &key
, slot
);
5164 * We might have an implicit trailing hole (NO_HOLES feature
5165 * enabled). We deal with it after leaving this loop.
5167 if (key
.objectid
!= clone_root
->ino
||
5168 key
.type
!= BTRFS_EXTENT_DATA_KEY
)
5171 ei
= btrfs_item_ptr(leaf
, slot
, struct btrfs_file_extent_item
);
5172 type
= btrfs_file_extent_type(leaf
, ei
);
5173 if (type
== BTRFS_FILE_EXTENT_INLINE
) {
5174 ext_len
= btrfs_file_extent_ram_bytes(leaf
, ei
);
5175 ext_len
= PAGE_ALIGN(ext_len
);
5177 ext_len
= btrfs_file_extent_num_bytes(leaf
, ei
);
5180 if (key
.offset
+ ext_len
<= clone_root
->offset
)
5183 if (key
.offset
> clone_root
->offset
) {
5184 /* Implicit hole, NO_HOLES feature enabled. */
5185 u64 hole_len
= key
.offset
- clone_root
->offset
;
5189 ret
= send_extent_data(sctx
, offset
, hole_len
);
5197 clone_root
->offset
+= hole_len
;
5198 data_offset
+= hole_len
;
5201 if (key
.offset
>= clone_root
->offset
+ len
)
5204 if (key
.offset
>= clone_src_i_size
)
5207 if (key
.offset
+ ext_len
> clone_src_i_size
)
5208 ext_len
= clone_src_i_size
- key
.offset
;
5210 clone_data_offset
= btrfs_file_extent_offset(leaf
, ei
);
5211 if (btrfs_file_extent_disk_bytenr(leaf
, ei
) == disk_byte
) {
5212 clone_root
->offset
= key
.offset
;
5213 if (clone_data_offset
< data_offset
&&
5214 clone_data_offset
+ ext_len
> data_offset
) {
5217 extent_offset
= data_offset
- clone_data_offset
;
5218 ext_len
-= extent_offset
;
5219 clone_data_offset
+= extent_offset
;
5220 clone_root
->offset
+= extent_offset
;
5224 clone_len
= min_t(u64
, ext_len
, len
);
5226 if (btrfs_file_extent_disk_bytenr(leaf
, ei
) == disk_byte
&&
5227 clone_data_offset
== data_offset
) {
5228 const u64 src_end
= clone_root
->offset
+ clone_len
;
5229 const u64 sectorsize
= SZ_64K
;
5232 * We can't clone the last block, when its size is not
5233 * sector size aligned, into the middle of a file. If we
5234 * do so, the receiver will get a failure (-EINVAL) when
5235 * trying to clone or will silently corrupt the data in
5236 * the destination file if it's on a kernel without the
5237 * fix introduced by commit ac765f83f1397646
5238 * ("Btrfs: fix data corruption due to cloning of eof
5241 * So issue a clone of the aligned down range plus a
5242 * regular write for the eof block, if we hit that case.
5244 * Also, we use the maximum possible sector size, 64K,
5245 * because we don't know what's the sector size of the
5246 * filesystem that receives the stream, so we have to
5247 * assume the largest possible sector size.
5249 if (src_end
== clone_src_i_size
&&
5250 !IS_ALIGNED(src_end
, sectorsize
) &&
5251 offset
+ clone_len
< sctx
->cur_inode_size
) {
5254 slen
= ALIGN_DOWN(src_end
- clone_root
->offset
,
5257 ret
= send_clone(sctx
, offset
, slen
,
5262 ret
= send_extent_data(sctx
, offset
+ slen
,
5265 ret
= send_clone(sctx
, offset
, clone_len
,
5269 ret
= send_extent_data(sctx
, offset
, clone_len
);
5278 offset
+= clone_len
;
5279 clone_root
->offset
+= clone_len
;
5280 data_offset
+= clone_len
;
5286 ret
= send_extent_data(sctx
, offset
, len
);
5290 btrfs_free_path(path
);
5294 static int send_write_or_clone(struct send_ctx
*sctx
,
5295 struct btrfs_path
*path
,
5296 struct btrfs_key
*key
,
5297 struct clone_root
*clone_root
)
5300 struct btrfs_file_extent_item
*ei
;
5301 u64 offset
= key
->offset
;
5304 u64 bs
= sctx
->send_root
->fs_info
->sb
->s_blocksize
;
5306 ei
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
5307 struct btrfs_file_extent_item
);
5308 type
= btrfs_file_extent_type(path
->nodes
[0], ei
);
5309 if (type
== BTRFS_FILE_EXTENT_INLINE
) {
5310 len
= btrfs_file_extent_ram_bytes(path
->nodes
[0], ei
);
5312 * it is possible the inline item won't cover the whole page,
5313 * but there may be items after this page. Make
5314 * sure to send the whole thing
5316 len
= PAGE_ALIGN(len
);
5318 len
= btrfs_file_extent_num_bytes(path
->nodes
[0], ei
);
5321 if (offset
>= sctx
->cur_inode_size
) {
5325 if (offset
+ len
> sctx
->cur_inode_size
)
5326 len
= sctx
->cur_inode_size
- offset
;
5332 if (clone_root
&& IS_ALIGNED(offset
+ len
, bs
)) {
5336 disk_byte
= btrfs_file_extent_disk_bytenr(path
->nodes
[0], ei
);
5337 data_offset
= btrfs_file_extent_offset(path
->nodes
[0], ei
);
5338 ret
= clone_range(sctx
, clone_root
, disk_byte
, data_offset
,
5341 ret
= send_extent_data(sctx
, offset
, len
);
5343 sctx
->cur_inode_next_write_offset
= offset
+ len
;
5348 static int is_extent_unchanged(struct send_ctx
*sctx
,
5349 struct btrfs_path
*left_path
,
5350 struct btrfs_key
*ekey
)
5353 struct btrfs_key key
;
5354 struct btrfs_path
*path
= NULL
;
5355 struct extent_buffer
*eb
;
5357 struct btrfs_key found_key
;
5358 struct btrfs_file_extent_item
*ei
;
5363 u64 left_offset_fixed
;
5371 path
= alloc_path_for_send();
5375 eb
= left_path
->nodes
[0];
5376 slot
= left_path
->slots
[0];
5377 ei
= btrfs_item_ptr(eb
, slot
, struct btrfs_file_extent_item
);
5378 left_type
= btrfs_file_extent_type(eb
, ei
);
5380 if (left_type
!= BTRFS_FILE_EXTENT_REG
) {
5384 left_disknr
= btrfs_file_extent_disk_bytenr(eb
, ei
);
5385 left_len
= btrfs_file_extent_num_bytes(eb
, ei
);
5386 left_offset
= btrfs_file_extent_offset(eb
, ei
);
5387 left_gen
= btrfs_file_extent_generation(eb
, ei
);
5390 * Following comments will refer to these graphics. L is the left
5391 * extents which we are checking at the moment. 1-8 are the right
5392 * extents that we iterate.
5395 * |-1-|-2a-|-3-|-4-|-5-|-6-|
5398 * |--1--|-2b-|...(same as above)
5400 * Alternative situation. Happens on files where extents got split.
5402 * |-----------7-----------|-6-|
5404 * Alternative situation. Happens on files which got larger.
5407 * Nothing follows after 8.
5410 key
.objectid
= ekey
->objectid
;
5411 key
.type
= BTRFS_EXTENT_DATA_KEY
;
5412 key
.offset
= ekey
->offset
;
5413 ret
= btrfs_search_slot_for_read(sctx
->parent_root
, &key
, path
, 0, 0);
5422 * Handle special case where the right side has no extents at all.
5424 eb
= path
->nodes
[0];
5425 slot
= path
->slots
[0];
5426 btrfs_item_key_to_cpu(eb
, &found_key
, slot
);
5427 if (found_key
.objectid
!= key
.objectid
||
5428 found_key
.type
!= key
.type
) {
5429 /* If we're a hole then just pretend nothing changed */
5430 ret
= (left_disknr
) ? 0 : 1;
5435 * We're now on 2a, 2b or 7.
5438 while (key
.offset
< ekey
->offset
+ left_len
) {
5439 ei
= btrfs_item_ptr(eb
, slot
, struct btrfs_file_extent_item
);
5440 right_type
= btrfs_file_extent_type(eb
, ei
);
5441 if (right_type
!= BTRFS_FILE_EXTENT_REG
&&
5442 right_type
!= BTRFS_FILE_EXTENT_INLINE
) {
5447 if (right_type
== BTRFS_FILE_EXTENT_INLINE
) {
5448 right_len
= btrfs_file_extent_ram_bytes(eb
, ei
);
5449 right_len
= PAGE_ALIGN(right_len
);
5451 right_len
= btrfs_file_extent_num_bytes(eb
, ei
);
5455 * Are we at extent 8? If yes, we know the extent is changed.
5456 * This may only happen on the first iteration.
5458 if (found_key
.offset
+ right_len
<= ekey
->offset
) {
5459 /* If we're a hole just pretend nothing changed */
5460 ret
= (left_disknr
) ? 0 : 1;
5465 * We just wanted to see if when we have an inline extent, what
5466 * follows it is a regular extent (wanted to check the above
5467 * condition for inline extents too). This should normally not
5468 * happen but it's possible for example when we have an inline
5469 * compressed extent representing data with a size matching
5470 * the page size (currently the same as sector size).
5472 if (right_type
== BTRFS_FILE_EXTENT_INLINE
) {
5477 right_disknr
= btrfs_file_extent_disk_bytenr(eb
, ei
);
5478 right_offset
= btrfs_file_extent_offset(eb
, ei
);
5479 right_gen
= btrfs_file_extent_generation(eb
, ei
);
5481 left_offset_fixed
= left_offset
;
5482 if (key
.offset
< ekey
->offset
) {
5483 /* Fix the right offset for 2a and 7. */
5484 right_offset
+= ekey
->offset
- key
.offset
;
5486 /* Fix the left offset for all behind 2a and 2b */
5487 left_offset_fixed
+= key
.offset
- ekey
->offset
;
5491 * Check if we have the same extent.
5493 if (left_disknr
!= right_disknr
||
5494 left_offset_fixed
!= right_offset
||
5495 left_gen
!= right_gen
) {
5501 * Go to the next extent.
5503 ret
= btrfs_next_item(sctx
->parent_root
, path
);
5507 eb
= path
->nodes
[0];
5508 slot
= path
->slots
[0];
5509 btrfs_item_key_to_cpu(eb
, &found_key
, slot
);
5511 if (ret
|| found_key
.objectid
!= key
.objectid
||
5512 found_key
.type
!= key
.type
) {
5513 key
.offset
+= right_len
;
5516 if (found_key
.offset
!= key
.offset
+ right_len
) {
5524 * We're now behind the left extent (treat as unchanged) or at the end
5525 * of the right side (treat as changed).
5527 if (key
.offset
>= ekey
->offset
+ left_len
)
5534 btrfs_free_path(path
);
5538 static int get_last_extent(struct send_ctx
*sctx
, u64 offset
)
5540 struct btrfs_path
*path
;
5541 struct btrfs_root
*root
= sctx
->send_root
;
5542 struct btrfs_file_extent_item
*fi
;
5543 struct btrfs_key key
;
5548 path
= alloc_path_for_send();
5552 sctx
->cur_inode_last_extent
= 0;
5554 key
.objectid
= sctx
->cur_ino
;
5555 key
.type
= BTRFS_EXTENT_DATA_KEY
;
5556 key
.offset
= offset
;
5557 ret
= btrfs_search_slot_for_read(root
, &key
, path
, 0, 1);
5561 btrfs_item_key_to_cpu(path
->nodes
[0], &key
, path
->slots
[0]);
5562 if (key
.objectid
!= sctx
->cur_ino
|| key
.type
!= BTRFS_EXTENT_DATA_KEY
)
5565 fi
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
5566 struct btrfs_file_extent_item
);
5567 type
= btrfs_file_extent_type(path
->nodes
[0], fi
);
5568 if (type
== BTRFS_FILE_EXTENT_INLINE
) {
5569 u64 size
= btrfs_file_extent_ram_bytes(path
->nodes
[0], fi
);
5570 extent_end
= ALIGN(key
.offset
+ size
,
5571 sctx
->send_root
->fs_info
->sectorsize
);
5573 extent_end
= key
.offset
+
5574 btrfs_file_extent_num_bytes(path
->nodes
[0], fi
);
5576 sctx
->cur_inode_last_extent
= extent_end
;
5578 btrfs_free_path(path
);
5582 static int range_is_hole_in_parent(struct send_ctx
*sctx
,
5586 struct btrfs_path
*path
;
5587 struct btrfs_key key
;
5588 struct btrfs_root
*root
= sctx
->parent_root
;
5589 u64 search_start
= start
;
5592 path
= alloc_path_for_send();
5596 key
.objectid
= sctx
->cur_ino
;
5597 key
.type
= BTRFS_EXTENT_DATA_KEY
;
5598 key
.offset
= search_start
;
5599 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
5602 if (ret
> 0 && path
->slots
[0] > 0)
5605 while (search_start
< end
) {
5606 struct extent_buffer
*leaf
= path
->nodes
[0];
5607 int slot
= path
->slots
[0];
5608 struct btrfs_file_extent_item
*fi
;
5611 if (slot
>= btrfs_header_nritems(leaf
)) {
5612 ret
= btrfs_next_leaf(root
, path
);
5620 btrfs_item_key_to_cpu(leaf
, &key
, slot
);
5621 if (key
.objectid
< sctx
->cur_ino
||
5622 key
.type
< BTRFS_EXTENT_DATA_KEY
)
5624 if (key
.objectid
> sctx
->cur_ino
||
5625 key
.type
> BTRFS_EXTENT_DATA_KEY
||
5629 fi
= btrfs_item_ptr(leaf
, slot
, struct btrfs_file_extent_item
);
5630 if (btrfs_file_extent_type(leaf
, fi
) ==
5631 BTRFS_FILE_EXTENT_INLINE
) {
5632 u64 size
= btrfs_file_extent_ram_bytes(leaf
, fi
);
5634 extent_end
= ALIGN(key
.offset
+ size
,
5635 root
->fs_info
->sectorsize
);
5637 extent_end
= key
.offset
+
5638 btrfs_file_extent_num_bytes(leaf
, fi
);
5640 if (extent_end
<= start
)
5642 if (btrfs_file_extent_disk_bytenr(leaf
, fi
) == 0) {
5643 search_start
= extent_end
;
5653 btrfs_free_path(path
);
5657 static int maybe_send_hole(struct send_ctx
*sctx
, struct btrfs_path
*path
,
5658 struct btrfs_key
*key
)
5660 struct btrfs_file_extent_item
*fi
;
5665 if (sctx
->cur_ino
!= key
->objectid
|| !need_send_hole(sctx
))
5668 if (sctx
->cur_inode_last_extent
== (u64
)-1) {
5669 ret
= get_last_extent(sctx
, key
->offset
- 1);
5674 fi
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
5675 struct btrfs_file_extent_item
);
5676 type
= btrfs_file_extent_type(path
->nodes
[0], fi
);
5677 if (type
== BTRFS_FILE_EXTENT_INLINE
) {
5678 u64 size
= btrfs_file_extent_ram_bytes(path
->nodes
[0], fi
);
5679 extent_end
= ALIGN(key
->offset
+ size
,
5680 sctx
->send_root
->fs_info
->sectorsize
);
5682 extent_end
= key
->offset
+
5683 btrfs_file_extent_num_bytes(path
->nodes
[0], fi
);
5686 if (path
->slots
[0] == 0 &&
5687 sctx
->cur_inode_last_extent
< key
->offset
) {
5689 * We might have skipped entire leafs that contained only
5690 * file extent items for our current inode. These leafs have
5691 * a generation number smaller (older) than the one in the
5692 * current leaf and the leaf our last extent came from, and
5693 * are located between these 2 leafs.
5695 ret
= get_last_extent(sctx
, key
->offset
- 1);
5700 if (sctx
->cur_inode_last_extent
< key
->offset
) {
5701 ret
= range_is_hole_in_parent(sctx
,
5702 sctx
->cur_inode_last_extent
,
5707 ret
= send_hole(sctx
, key
->offset
);
5711 sctx
->cur_inode_last_extent
= extent_end
;
5715 static int process_extent(struct send_ctx
*sctx
,
5716 struct btrfs_path
*path
,
5717 struct btrfs_key
*key
)
5719 struct clone_root
*found_clone
= NULL
;
5722 if (S_ISLNK(sctx
->cur_inode_mode
))
5725 if (sctx
->parent_root
&& !sctx
->cur_inode_new
) {
5726 ret
= is_extent_unchanged(sctx
, path
, key
);
5734 struct btrfs_file_extent_item
*ei
;
5737 ei
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
5738 struct btrfs_file_extent_item
);
5739 type
= btrfs_file_extent_type(path
->nodes
[0], ei
);
5740 if (type
== BTRFS_FILE_EXTENT_PREALLOC
||
5741 type
== BTRFS_FILE_EXTENT_REG
) {
5743 * The send spec does not have a prealloc command yet,
5744 * so just leave a hole for prealloc'ed extents until
5745 * we have enough commands queued up to justify rev'ing
5748 if (type
== BTRFS_FILE_EXTENT_PREALLOC
) {
5753 /* Have a hole, just skip it. */
5754 if (btrfs_file_extent_disk_bytenr(path
->nodes
[0], ei
) == 0) {
5761 ret
= find_extent_clone(sctx
, path
, key
->objectid
, key
->offset
,
5762 sctx
->cur_inode_size
, &found_clone
);
5763 if (ret
!= -ENOENT
&& ret
< 0)
5766 ret
= send_write_or_clone(sctx
, path
, key
, found_clone
);
5770 ret
= maybe_send_hole(sctx
, path
, key
);
5775 static int process_all_extents(struct send_ctx
*sctx
)
5778 struct btrfs_root
*root
;
5779 struct btrfs_path
*path
;
5780 struct btrfs_key key
;
5781 struct btrfs_key found_key
;
5782 struct extent_buffer
*eb
;
5785 root
= sctx
->send_root
;
5786 path
= alloc_path_for_send();
5790 key
.objectid
= sctx
->cmp_key
->objectid
;
5791 key
.type
= BTRFS_EXTENT_DATA_KEY
;
5793 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
5798 eb
= path
->nodes
[0];
5799 slot
= path
->slots
[0];
5801 if (slot
>= btrfs_header_nritems(eb
)) {
5802 ret
= btrfs_next_leaf(root
, path
);
5805 } else if (ret
> 0) {
5812 btrfs_item_key_to_cpu(eb
, &found_key
, slot
);
5814 if (found_key
.objectid
!= key
.objectid
||
5815 found_key
.type
!= key
.type
) {
5820 ret
= process_extent(sctx
, path
, &found_key
);
5828 btrfs_free_path(path
);
5832 static int process_recorded_refs_if_needed(struct send_ctx
*sctx
, int at_end
,
5834 int *refs_processed
)
5838 if (sctx
->cur_ino
== 0)
5840 if (!at_end
&& sctx
->cur_ino
== sctx
->cmp_key
->objectid
&&
5841 sctx
->cmp_key
->type
<= BTRFS_INODE_EXTREF_KEY
)
5843 if (list_empty(&sctx
->new_refs
) && list_empty(&sctx
->deleted_refs
))
5846 ret
= process_recorded_refs(sctx
, pending_move
);
5850 *refs_processed
= 1;
5855 static int finish_inode_if_needed(struct send_ctx
*sctx
, int at_end
)
5866 int need_truncate
= 1;
5867 int pending_move
= 0;
5868 int refs_processed
= 0;
5870 if (sctx
->ignore_cur_inode
)
5873 ret
= process_recorded_refs_if_needed(sctx
, at_end
, &pending_move
,
5879 * We have processed the refs and thus need to advance send_progress.
5880 * Now, calls to get_cur_xxx will take the updated refs of the current
5881 * inode into account.
5883 * On the other hand, if our current inode is a directory and couldn't
5884 * be moved/renamed because its parent was renamed/moved too and it has
5885 * a higher inode number, we can only move/rename our current inode
5886 * after we moved/renamed its parent. Therefore in this case operate on
5887 * the old path (pre move/rename) of our current inode, and the
5888 * move/rename will be performed later.
5890 if (refs_processed
&& !pending_move
)
5891 sctx
->send_progress
= sctx
->cur_ino
+ 1;
5893 if (sctx
->cur_ino
== 0 || sctx
->cur_inode_deleted
)
5895 if (!at_end
&& sctx
->cmp_key
->objectid
== sctx
->cur_ino
)
5898 ret
= get_inode_info(sctx
->send_root
, sctx
->cur_ino
, NULL
, NULL
,
5899 &left_mode
, &left_uid
, &left_gid
, NULL
);
5903 if (!sctx
->parent_root
|| sctx
->cur_inode_new
) {
5905 if (!S_ISLNK(sctx
->cur_inode_mode
))
5907 if (sctx
->cur_inode_next_write_offset
== sctx
->cur_inode_size
)
5912 ret
= get_inode_info(sctx
->parent_root
, sctx
->cur_ino
,
5913 &old_size
, NULL
, &right_mode
, &right_uid
,
5918 if (left_uid
!= right_uid
|| left_gid
!= right_gid
)
5920 if (!S_ISLNK(sctx
->cur_inode_mode
) && left_mode
!= right_mode
)
5922 if ((old_size
== sctx
->cur_inode_size
) ||
5923 (sctx
->cur_inode_size
> old_size
&&
5924 sctx
->cur_inode_next_write_offset
== sctx
->cur_inode_size
))
5928 if (S_ISREG(sctx
->cur_inode_mode
)) {
5929 if (need_send_hole(sctx
)) {
5930 if (sctx
->cur_inode_last_extent
== (u64
)-1 ||
5931 sctx
->cur_inode_last_extent
<
5932 sctx
->cur_inode_size
) {
5933 ret
= get_last_extent(sctx
, (u64
)-1);
5937 if (sctx
->cur_inode_last_extent
<
5938 sctx
->cur_inode_size
) {
5939 ret
= send_hole(sctx
, sctx
->cur_inode_size
);
5944 if (need_truncate
) {
5945 ret
= send_truncate(sctx
, sctx
->cur_ino
,
5946 sctx
->cur_inode_gen
,
5947 sctx
->cur_inode_size
);
5954 ret
= send_chown(sctx
, sctx
->cur_ino
, sctx
->cur_inode_gen
,
5955 left_uid
, left_gid
);
5960 ret
= send_chmod(sctx
, sctx
->cur_ino
, sctx
->cur_inode_gen
,
5967 * If other directory inodes depended on our current directory
5968 * inode's move/rename, now do their move/rename operations.
5970 if (!is_waiting_for_move(sctx
, sctx
->cur_ino
)) {
5971 ret
= apply_children_dir_moves(sctx
);
5975 * Need to send that every time, no matter if it actually
5976 * changed between the two trees as we have done changes to
5977 * the inode before. If our inode is a directory and it's
5978 * waiting to be moved/renamed, we will send its utimes when
5979 * it's moved/renamed, therefore we don't need to do it here.
5981 sctx
->send_progress
= sctx
->cur_ino
+ 1;
5982 ret
= send_utimes(sctx
, sctx
->cur_ino
, sctx
->cur_inode_gen
);
5991 struct parent_paths_ctx
{
5992 struct list_head
*refs
;
5993 struct send_ctx
*sctx
;
5996 static int record_parent_ref(int num
, u64 dir
, int index
, struct fs_path
*name
,
5999 struct parent_paths_ctx
*ppctx
= ctx
;
6001 return record_ref(ppctx
->sctx
->parent_root
, dir
, name
, ppctx
->sctx
,
6006 * Issue unlink operations for all paths of the current inode found in the
6009 static int btrfs_unlink_all_paths(struct send_ctx
*sctx
)
6011 LIST_HEAD(deleted_refs
);
6012 struct btrfs_path
*path
;
6013 struct btrfs_key key
;
6014 struct parent_paths_ctx ctx
;
6017 path
= alloc_path_for_send();
6021 key
.objectid
= sctx
->cur_ino
;
6022 key
.type
= BTRFS_INODE_REF_KEY
;
6024 ret
= btrfs_search_slot(NULL
, sctx
->parent_root
, &key
, path
, 0, 0);
6028 ctx
.refs
= &deleted_refs
;
6032 struct extent_buffer
*eb
= path
->nodes
[0];
6033 int slot
= path
->slots
[0];
6035 if (slot
>= btrfs_header_nritems(eb
)) {
6036 ret
= btrfs_next_leaf(sctx
->parent_root
, path
);
6044 btrfs_item_key_to_cpu(eb
, &key
, slot
);
6045 if (key
.objectid
!= sctx
->cur_ino
)
6047 if (key
.type
!= BTRFS_INODE_REF_KEY
&&
6048 key
.type
!= BTRFS_INODE_EXTREF_KEY
)
6051 ret
= iterate_inode_ref(sctx
->parent_root
, path
, &key
, 1,
6052 record_parent_ref
, &ctx
);
6059 while (!list_empty(&deleted_refs
)) {
6060 struct recorded_ref
*ref
;
6062 ref
= list_first_entry(&deleted_refs
, struct recorded_ref
, list
);
6063 ret
= send_unlink(sctx
, ref
->full_path
);
6066 fs_path_free(ref
->full_path
);
6067 list_del(&ref
->list
);
6072 btrfs_free_path(path
);
6074 __free_recorded_refs(&deleted_refs
);
6078 static int changed_inode(struct send_ctx
*sctx
,
6079 enum btrfs_compare_tree_result result
)
6082 struct btrfs_key
*key
= sctx
->cmp_key
;
6083 struct btrfs_inode_item
*left_ii
= NULL
;
6084 struct btrfs_inode_item
*right_ii
= NULL
;
6088 sctx
->cur_ino
= key
->objectid
;
6089 sctx
->cur_inode_new_gen
= 0;
6090 sctx
->cur_inode_last_extent
= (u64
)-1;
6091 sctx
->cur_inode_next_write_offset
= 0;
6092 sctx
->ignore_cur_inode
= false;
6095 * Set send_progress to current inode. This will tell all get_cur_xxx
6096 * functions that the current inode's refs are not updated yet. Later,
6097 * when process_recorded_refs is finished, it is set to cur_ino + 1.
6099 sctx
->send_progress
= sctx
->cur_ino
;
6101 if (result
== BTRFS_COMPARE_TREE_NEW
||
6102 result
== BTRFS_COMPARE_TREE_CHANGED
) {
6103 left_ii
= btrfs_item_ptr(sctx
->left_path
->nodes
[0],
6104 sctx
->left_path
->slots
[0],
6105 struct btrfs_inode_item
);
6106 left_gen
= btrfs_inode_generation(sctx
->left_path
->nodes
[0],
6109 right_ii
= btrfs_item_ptr(sctx
->right_path
->nodes
[0],
6110 sctx
->right_path
->slots
[0],
6111 struct btrfs_inode_item
);
6112 right_gen
= btrfs_inode_generation(sctx
->right_path
->nodes
[0],
6115 if (result
== BTRFS_COMPARE_TREE_CHANGED
) {
6116 right_ii
= btrfs_item_ptr(sctx
->right_path
->nodes
[0],
6117 sctx
->right_path
->slots
[0],
6118 struct btrfs_inode_item
);
6120 right_gen
= btrfs_inode_generation(sctx
->right_path
->nodes
[0],
6124 * The cur_ino = root dir case is special here. We can't treat
6125 * the inode as deleted+reused because it would generate a
6126 * stream that tries to delete/mkdir the root dir.
6128 if (left_gen
!= right_gen
&&
6129 sctx
->cur_ino
!= BTRFS_FIRST_FREE_OBJECTID
)
6130 sctx
->cur_inode_new_gen
= 1;
6134 * Normally we do not find inodes with a link count of zero (orphans)
6135 * because the most common case is to create a snapshot and use it
6136 * for a send operation. However other less common use cases involve
6137 * using a subvolume and send it after turning it to RO mode just
6138 * after deleting all hard links of a file while holding an open
6139 * file descriptor against it or turning a RO snapshot into RW mode,
6140 * keep an open file descriptor against a file, delete it and then
6141 * turn the snapshot back to RO mode before using it for a send
6142 * operation. So if we find such cases, ignore the inode and all its
6143 * items completely if it's a new inode, or if it's a changed inode
6144 * make sure all its previous paths (from the parent snapshot) are all
6145 * unlinked and all other the inode items are ignored.
6147 if (result
== BTRFS_COMPARE_TREE_NEW
||
6148 result
== BTRFS_COMPARE_TREE_CHANGED
) {
6151 nlinks
= btrfs_inode_nlink(sctx
->left_path
->nodes
[0], left_ii
);
6153 sctx
->ignore_cur_inode
= true;
6154 if (result
== BTRFS_COMPARE_TREE_CHANGED
)
6155 ret
= btrfs_unlink_all_paths(sctx
);
6160 if (result
== BTRFS_COMPARE_TREE_NEW
) {
6161 sctx
->cur_inode_gen
= left_gen
;
6162 sctx
->cur_inode_new
= 1;
6163 sctx
->cur_inode_deleted
= 0;
6164 sctx
->cur_inode_size
= btrfs_inode_size(
6165 sctx
->left_path
->nodes
[0], left_ii
);
6166 sctx
->cur_inode_mode
= btrfs_inode_mode(
6167 sctx
->left_path
->nodes
[0], left_ii
);
6168 sctx
->cur_inode_rdev
= btrfs_inode_rdev(
6169 sctx
->left_path
->nodes
[0], left_ii
);
6170 if (sctx
->cur_ino
!= BTRFS_FIRST_FREE_OBJECTID
)
6171 ret
= send_create_inode_if_needed(sctx
);
6172 } else if (result
== BTRFS_COMPARE_TREE_DELETED
) {
6173 sctx
->cur_inode_gen
= right_gen
;
6174 sctx
->cur_inode_new
= 0;
6175 sctx
->cur_inode_deleted
= 1;
6176 sctx
->cur_inode_size
= btrfs_inode_size(
6177 sctx
->right_path
->nodes
[0], right_ii
);
6178 sctx
->cur_inode_mode
= btrfs_inode_mode(
6179 sctx
->right_path
->nodes
[0], right_ii
);
6180 } else if (result
== BTRFS_COMPARE_TREE_CHANGED
) {
6182 * We need to do some special handling in case the inode was
6183 * reported as changed with a changed generation number. This
6184 * means that the original inode was deleted and new inode
6185 * reused the same inum. So we have to treat the old inode as
6186 * deleted and the new one as new.
6188 if (sctx
->cur_inode_new_gen
) {
6190 * First, process the inode as if it was deleted.
6192 sctx
->cur_inode_gen
= right_gen
;
6193 sctx
->cur_inode_new
= 0;
6194 sctx
->cur_inode_deleted
= 1;
6195 sctx
->cur_inode_size
= btrfs_inode_size(
6196 sctx
->right_path
->nodes
[0], right_ii
);
6197 sctx
->cur_inode_mode
= btrfs_inode_mode(
6198 sctx
->right_path
->nodes
[0], right_ii
);
6199 ret
= process_all_refs(sctx
,
6200 BTRFS_COMPARE_TREE_DELETED
);
6205 * Now process the inode as if it was new.
6207 sctx
->cur_inode_gen
= left_gen
;
6208 sctx
->cur_inode_new
= 1;
6209 sctx
->cur_inode_deleted
= 0;
6210 sctx
->cur_inode_size
= btrfs_inode_size(
6211 sctx
->left_path
->nodes
[0], left_ii
);
6212 sctx
->cur_inode_mode
= btrfs_inode_mode(
6213 sctx
->left_path
->nodes
[0], left_ii
);
6214 sctx
->cur_inode_rdev
= btrfs_inode_rdev(
6215 sctx
->left_path
->nodes
[0], left_ii
);
6216 ret
= send_create_inode_if_needed(sctx
);
6220 ret
= process_all_refs(sctx
, BTRFS_COMPARE_TREE_NEW
);
6224 * Advance send_progress now as we did not get into
6225 * process_recorded_refs_if_needed in the new_gen case.
6227 sctx
->send_progress
= sctx
->cur_ino
+ 1;
6230 * Now process all extents and xattrs of the inode as if
6231 * they were all new.
6233 ret
= process_all_extents(sctx
);
6236 ret
= process_all_new_xattrs(sctx
);
6240 sctx
->cur_inode_gen
= left_gen
;
6241 sctx
->cur_inode_new
= 0;
6242 sctx
->cur_inode_new_gen
= 0;
6243 sctx
->cur_inode_deleted
= 0;
6244 sctx
->cur_inode_size
= btrfs_inode_size(
6245 sctx
->left_path
->nodes
[0], left_ii
);
6246 sctx
->cur_inode_mode
= btrfs_inode_mode(
6247 sctx
->left_path
->nodes
[0], left_ii
);
6256 * We have to process new refs before deleted refs, but compare_trees gives us
6257 * the new and deleted refs mixed. To fix this, we record the new/deleted refs
6258 * first and later process them in process_recorded_refs.
6259 * For the cur_inode_new_gen case, we skip recording completely because
6260 * changed_inode did already initiate processing of refs. The reason for this is
6261 * that in this case, compare_tree actually compares the refs of 2 different
6262 * inodes. To fix this, process_all_refs is used in changed_inode to handle all
6263 * refs of the right tree as deleted and all refs of the left tree as new.
6265 static int changed_ref(struct send_ctx
*sctx
,
6266 enum btrfs_compare_tree_result result
)
6270 if (sctx
->cur_ino
!= sctx
->cmp_key
->objectid
) {
6271 inconsistent_snapshot_error(sctx
, result
, "reference");
6275 if (!sctx
->cur_inode_new_gen
&&
6276 sctx
->cur_ino
!= BTRFS_FIRST_FREE_OBJECTID
) {
6277 if (result
== BTRFS_COMPARE_TREE_NEW
)
6278 ret
= record_new_ref(sctx
);
6279 else if (result
== BTRFS_COMPARE_TREE_DELETED
)
6280 ret
= record_deleted_ref(sctx
);
6281 else if (result
== BTRFS_COMPARE_TREE_CHANGED
)
6282 ret
= record_changed_ref(sctx
);
6289 * Process new/deleted/changed xattrs. We skip processing in the
6290 * cur_inode_new_gen case because changed_inode did already initiate processing
6291 * of xattrs. The reason is the same as in changed_ref
6293 static int changed_xattr(struct send_ctx
*sctx
,
6294 enum btrfs_compare_tree_result result
)
6298 if (sctx
->cur_ino
!= sctx
->cmp_key
->objectid
) {
6299 inconsistent_snapshot_error(sctx
, result
, "xattr");
6303 if (!sctx
->cur_inode_new_gen
&& !sctx
->cur_inode_deleted
) {
6304 if (result
== BTRFS_COMPARE_TREE_NEW
)
6305 ret
= process_new_xattr(sctx
);
6306 else if (result
== BTRFS_COMPARE_TREE_DELETED
)
6307 ret
= process_deleted_xattr(sctx
);
6308 else if (result
== BTRFS_COMPARE_TREE_CHANGED
)
6309 ret
= process_changed_xattr(sctx
);
6316 * Process new/deleted/changed extents. We skip processing in the
6317 * cur_inode_new_gen case because changed_inode did already initiate processing
6318 * of extents. The reason is the same as in changed_ref
6320 static int changed_extent(struct send_ctx
*sctx
,
6321 enum btrfs_compare_tree_result result
)
6325 if (sctx
->cur_ino
!= sctx
->cmp_key
->objectid
) {
6327 if (result
== BTRFS_COMPARE_TREE_CHANGED
) {
6328 struct extent_buffer
*leaf_l
;
6329 struct extent_buffer
*leaf_r
;
6330 struct btrfs_file_extent_item
*ei_l
;
6331 struct btrfs_file_extent_item
*ei_r
;
6333 leaf_l
= sctx
->left_path
->nodes
[0];
6334 leaf_r
= sctx
->right_path
->nodes
[0];
6335 ei_l
= btrfs_item_ptr(leaf_l
,
6336 sctx
->left_path
->slots
[0],
6337 struct btrfs_file_extent_item
);
6338 ei_r
= btrfs_item_ptr(leaf_r
,
6339 sctx
->right_path
->slots
[0],
6340 struct btrfs_file_extent_item
);
6343 * We may have found an extent item that has changed
6344 * only its disk_bytenr field and the corresponding
6345 * inode item was not updated. This case happens due to
6346 * very specific timings during relocation when a leaf
6347 * that contains file extent items is COWed while
6348 * relocation is ongoing and its in the stage where it
6349 * updates data pointers. So when this happens we can
6350 * safely ignore it since we know it's the same extent,
6351 * but just at different logical and physical locations
6352 * (when an extent is fully replaced with a new one, we
6353 * know the generation number must have changed too,
6354 * since snapshot creation implies committing the current
6355 * transaction, and the inode item must have been updated
6357 * This replacement of the disk_bytenr happens at
6358 * relocation.c:replace_file_extents() through
6359 * relocation.c:btrfs_reloc_cow_block().
6361 if (btrfs_file_extent_generation(leaf_l
, ei_l
) ==
6362 btrfs_file_extent_generation(leaf_r
, ei_r
) &&
6363 btrfs_file_extent_ram_bytes(leaf_l
, ei_l
) ==
6364 btrfs_file_extent_ram_bytes(leaf_r
, ei_r
) &&
6365 btrfs_file_extent_compression(leaf_l
, ei_l
) ==
6366 btrfs_file_extent_compression(leaf_r
, ei_r
) &&
6367 btrfs_file_extent_encryption(leaf_l
, ei_l
) ==
6368 btrfs_file_extent_encryption(leaf_r
, ei_r
) &&
6369 btrfs_file_extent_other_encoding(leaf_l
, ei_l
) ==
6370 btrfs_file_extent_other_encoding(leaf_r
, ei_r
) &&
6371 btrfs_file_extent_type(leaf_l
, ei_l
) ==
6372 btrfs_file_extent_type(leaf_r
, ei_r
) &&
6373 btrfs_file_extent_disk_bytenr(leaf_l
, ei_l
) !=
6374 btrfs_file_extent_disk_bytenr(leaf_r
, ei_r
) &&
6375 btrfs_file_extent_disk_num_bytes(leaf_l
, ei_l
) ==
6376 btrfs_file_extent_disk_num_bytes(leaf_r
, ei_r
) &&
6377 btrfs_file_extent_offset(leaf_l
, ei_l
) ==
6378 btrfs_file_extent_offset(leaf_r
, ei_r
) &&
6379 btrfs_file_extent_num_bytes(leaf_l
, ei_l
) ==
6380 btrfs_file_extent_num_bytes(leaf_r
, ei_r
))
6384 inconsistent_snapshot_error(sctx
, result
, "extent");
6388 if (!sctx
->cur_inode_new_gen
&& !sctx
->cur_inode_deleted
) {
6389 if (result
!= BTRFS_COMPARE_TREE_DELETED
)
6390 ret
= process_extent(sctx
, sctx
->left_path
,
6397 static int dir_changed(struct send_ctx
*sctx
, u64 dir
)
6399 u64 orig_gen
, new_gen
;
6402 ret
= get_inode_info(sctx
->send_root
, dir
, NULL
, &new_gen
, NULL
, NULL
,
6407 ret
= get_inode_info(sctx
->parent_root
, dir
, NULL
, &orig_gen
, NULL
,
6412 return (orig_gen
!= new_gen
) ? 1 : 0;
6415 static int compare_refs(struct send_ctx
*sctx
, struct btrfs_path
*path
,
6416 struct btrfs_key
*key
)
6418 struct btrfs_inode_extref
*extref
;
6419 struct extent_buffer
*leaf
;
6420 u64 dirid
= 0, last_dirid
= 0;
6427 /* Easy case, just check this one dirid */
6428 if (key
->type
== BTRFS_INODE_REF_KEY
) {
6429 dirid
= key
->offset
;
6431 ret
= dir_changed(sctx
, dirid
);
6435 leaf
= path
->nodes
[0];
6436 item_size
= btrfs_item_size_nr(leaf
, path
->slots
[0]);
6437 ptr
= btrfs_item_ptr_offset(leaf
, path
->slots
[0]);
6438 while (cur_offset
< item_size
) {
6439 extref
= (struct btrfs_inode_extref
*)(ptr
+
6441 dirid
= btrfs_inode_extref_parent(leaf
, extref
);
6442 ref_name_len
= btrfs_inode_extref_name_len(leaf
, extref
);
6443 cur_offset
+= ref_name_len
+ sizeof(*extref
);
6444 if (dirid
== last_dirid
)
6446 ret
= dir_changed(sctx
, dirid
);
6456 * Updates compare related fields in sctx and simply forwards to the actual
6457 * changed_xxx functions.
6459 static int changed_cb(struct btrfs_path
*left_path
,
6460 struct btrfs_path
*right_path
,
6461 struct btrfs_key
*key
,
6462 enum btrfs_compare_tree_result result
,
6466 struct send_ctx
*sctx
= ctx
;
6468 if (result
== BTRFS_COMPARE_TREE_SAME
) {
6469 if (key
->type
== BTRFS_INODE_REF_KEY
||
6470 key
->type
== BTRFS_INODE_EXTREF_KEY
) {
6471 ret
= compare_refs(sctx
, left_path
, key
);
6476 } else if (key
->type
== BTRFS_EXTENT_DATA_KEY
) {
6477 return maybe_send_hole(sctx
, left_path
, key
);
6481 result
= BTRFS_COMPARE_TREE_CHANGED
;
6485 sctx
->left_path
= left_path
;
6486 sctx
->right_path
= right_path
;
6487 sctx
->cmp_key
= key
;
6489 ret
= finish_inode_if_needed(sctx
, 0);
6493 /* Ignore non-FS objects */
6494 if (key
->objectid
== BTRFS_FREE_INO_OBJECTID
||
6495 key
->objectid
== BTRFS_FREE_SPACE_OBJECTID
)
6498 if (key
->type
== BTRFS_INODE_ITEM_KEY
) {
6499 ret
= changed_inode(sctx
, result
);
6500 } else if (!sctx
->ignore_cur_inode
) {
6501 if (key
->type
== BTRFS_INODE_REF_KEY
||
6502 key
->type
== BTRFS_INODE_EXTREF_KEY
)
6503 ret
= changed_ref(sctx
, result
);
6504 else if (key
->type
== BTRFS_XATTR_ITEM_KEY
)
6505 ret
= changed_xattr(sctx
, result
);
6506 else if (key
->type
== BTRFS_EXTENT_DATA_KEY
)
6507 ret
= changed_extent(sctx
, result
);
6514 static int full_send_tree(struct send_ctx
*sctx
)
6517 struct btrfs_root
*send_root
= sctx
->send_root
;
6518 struct btrfs_key key
;
6519 struct btrfs_path
*path
;
6520 struct extent_buffer
*eb
;
6523 path
= alloc_path_for_send();
6527 key
.objectid
= BTRFS_FIRST_FREE_OBJECTID
;
6528 key
.type
= BTRFS_INODE_ITEM_KEY
;
6531 ret
= btrfs_search_slot_for_read(send_root
, &key
, path
, 1, 0);
6538 eb
= path
->nodes
[0];
6539 slot
= path
->slots
[0];
6540 btrfs_item_key_to_cpu(eb
, &key
, slot
);
6542 ret
= changed_cb(path
, NULL
, &key
,
6543 BTRFS_COMPARE_TREE_NEW
, sctx
);
6547 ret
= btrfs_next_item(send_root
, path
);
6557 ret
= finish_inode_if_needed(sctx
, 1);
6560 btrfs_free_path(path
);
6564 static int send_subvol(struct send_ctx
*sctx
)
6568 if (!(sctx
->flags
& BTRFS_SEND_FLAG_OMIT_STREAM_HEADER
)) {
6569 ret
= send_header(sctx
);
6574 ret
= send_subvol_begin(sctx
);
6578 if (sctx
->parent_root
) {
6579 ret
= btrfs_compare_trees(sctx
->send_root
, sctx
->parent_root
,
6583 ret
= finish_inode_if_needed(sctx
, 1);
6587 ret
= full_send_tree(sctx
);
6593 free_recorded_refs(sctx
);
6598 * If orphan cleanup did remove any orphans from a root, it means the tree
6599 * was modified and therefore the commit root is not the same as the current
6600 * root anymore. This is a problem, because send uses the commit root and
6601 * therefore can see inode items that don't exist in the current root anymore,
6602 * and for example make calls to btrfs_iget, which will do tree lookups based
6603 * on the current root and not on the commit root. Those lookups will fail,
6604 * returning a -ESTALE error, and making send fail with that error. So make
6605 * sure a send does not see any orphans we have just removed, and that it will
6606 * see the same inodes regardless of whether a transaction commit happened
6607 * before it started (meaning that the commit root will be the same as the
6608 * current root) or not.
6610 static int ensure_commit_roots_uptodate(struct send_ctx
*sctx
)
6613 struct btrfs_trans_handle
*trans
= NULL
;
6616 if (sctx
->parent_root
&&
6617 sctx
->parent_root
->node
!= sctx
->parent_root
->commit_root
)
6620 for (i
= 0; i
< sctx
->clone_roots_cnt
; i
++)
6621 if (sctx
->clone_roots
[i
].root
->node
!=
6622 sctx
->clone_roots
[i
].root
->commit_root
)
6626 return btrfs_end_transaction(trans
);
6631 /* Use any root, all fs roots will get their commit roots updated. */
6633 trans
= btrfs_join_transaction(sctx
->send_root
);
6635 return PTR_ERR(trans
);
6639 return btrfs_commit_transaction(trans
);
6643 * Make sure any existing dellaloc is flushed for any root used by a send
6644 * operation so that we do not miss any data and we do not race with writeback
6645 * finishing and changing a tree while send is using the tree. This could
6646 * happen if a subvolume is in RW mode, has delalloc, is turned to RO mode and
6647 * a send operation then uses the subvolume.
6648 * After flushing delalloc ensure_commit_roots_uptodate() must be called.
6650 static int flush_delalloc_roots(struct send_ctx
*sctx
)
6652 struct btrfs_root
*root
= sctx
->parent_root
;
6657 ret
= btrfs_start_delalloc_snapshot(root
);
6660 btrfs_wait_ordered_extents(root
, U64_MAX
, 0, U64_MAX
);
6663 for (i
= 0; i
< sctx
->clone_roots_cnt
; i
++) {
6664 root
= sctx
->clone_roots
[i
].root
;
6665 ret
= btrfs_start_delalloc_snapshot(root
);
6668 btrfs_wait_ordered_extents(root
, U64_MAX
, 0, U64_MAX
);
6674 static void btrfs_root_dec_send_in_progress(struct btrfs_root
* root
)
6676 spin_lock(&root
->root_item_lock
);
6677 root
->send_in_progress
--;
6679 * Not much left to do, we don't know why it's unbalanced and
6680 * can't blindly reset it to 0.
6682 if (root
->send_in_progress
< 0)
6683 btrfs_err(root
->fs_info
,
6684 "send_in_progress unbalanced %d root %llu",
6685 root
->send_in_progress
, root
->root_key
.objectid
);
6686 spin_unlock(&root
->root_item_lock
);
6689 static void dedupe_in_progress_warn(const struct btrfs_root
*root
)
6691 btrfs_warn_rl(root
->fs_info
,
6692 "cannot use root %llu for send while deduplications on it are in progress (%d in progress)",
6693 root
->root_key
.objectid
, root
->dedupe_in_progress
);
6696 long btrfs_ioctl_send(struct file
*mnt_file
, struct btrfs_ioctl_send_args
*arg
)
6699 struct btrfs_root
*send_root
= BTRFS_I(file_inode(mnt_file
))->root
;
6700 struct btrfs_fs_info
*fs_info
= send_root
->fs_info
;
6701 struct btrfs_root
*clone_root
;
6702 struct btrfs_key key
;
6703 struct send_ctx
*sctx
= NULL
;
6705 u64
*clone_sources_tmp
= NULL
;
6706 int clone_sources_to_rollback
= 0;
6707 unsigned alloc_size
;
6708 int sort_clone_roots
= 0;
6711 if (!capable(CAP_SYS_ADMIN
))
6715 * The subvolume must remain read-only during send, protect against
6716 * making it RW. This also protects against deletion.
6718 spin_lock(&send_root
->root_item_lock
);
6719 if (btrfs_root_readonly(send_root
) && send_root
->dedupe_in_progress
) {
6720 dedupe_in_progress_warn(send_root
);
6721 spin_unlock(&send_root
->root_item_lock
);
6724 send_root
->send_in_progress
++;
6725 spin_unlock(&send_root
->root_item_lock
);
6728 * This is done when we lookup the root, it should already be complete
6729 * by the time we get here.
6731 WARN_ON(send_root
->orphan_cleanup_state
!= ORPHAN_CLEANUP_DONE
);
6734 * Userspace tools do the checks and warn the user if it's
6737 if (!btrfs_root_readonly(send_root
)) {
6743 * Check that we don't overflow at later allocations, we request
6744 * clone_sources_count + 1 items, and compare to unsigned long inside
6747 if (arg
->clone_sources_count
>
6748 ULONG_MAX
/ sizeof(struct clone_root
) - 1) {
6753 if (!access_ok(arg
->clone_sources
,
6754 sizeof(*arg
->clone_sources
) *
6755 arg
->clone_sources_count
)) {
6760 if (arg
->flags
& ~BTRFS_SEND_FLAG_MASK
) {
6765 sctx
= kzalloc(sizeof(struct send_ctx
), GFP_KERNEL
);
6771 INIT_LIST_HEAD(&sctx
->new_refs
);
6772 INIT_LIST_HEAD(&sctx
->deleted_refs
);
6773 INIT_RADIX_TREE(&sctx
->name_cache
, GFP_KERNEL
);
6774 INIT_LIST_HEAD(&sctx
->name_cache_list
);
6776 sctx
->flags
= arg
->flags
;
6778 sctx
->send_filp
= fget(arg
->send_fd
);
6779 if (!sctx
->send_filp
) {
6784 sctx
->send_root
= send_root
;
6786 * Unlikely but possible, if the subvolume is marked for deletion but
6787 * is slow to remove the directory entry, send can still be started
6789 if (btrfs_root_dead(sctx
->send_root
)) {
6794 sctx
->clone_roots_cnt
= arg
->clone_sources_count
;
6796 sctx
->send_max_size
= BTRFS_SEND_BUF_SIZE
;
6797 sctx
->send_buf
= kvmalloc(sctx
->send_max_size
, GFP_KERNEL
);
6798 if (!sctx
->send_buf
) {
6803 sctx
->read_buf
= kvmalloc(BTRFS_SEND_READ_SIZE
, GFP_KERNEL
);
6804 if (!sctx
->read_buf
) {
6809 sctx
->pending_dir_moves
= RB_ROOT
;
6810 sctx
->waiting_dir_moves
= RB_ROOT
;
6811 sctx
->orphan_dirs
= RB_ROOT
;
6813 alloc_size
= sizeof(struct clone_root
) * (arg
->clone_sources_count
+ 1);
6815 sctx
->clone_roots
= kzalloc(alloc_size
, GFP_KERNEL
);
6816 if (!sctx
->clone_roots
) {
6821 alloc_size
= arg
->clone_sources_count
* sizeof(*arg
->clone_sources
);
6823 if (arg
->clone_sources_count
) {
6824 clone_sources_tmp
= kvmalloc(alloc_size
, GFP_KERNEL
);
6825 if (!clone_sources_tmp
) {
6830 ret
= copy_from_user(clone_sources_tmp
, arg
->clone_sources
,
6837 for (i
= 0; i
< arg
->clone_sources_count
; i
++) {
6838 key
.objectid
= clone_sources_tmp
[i
];
6839 key
.type
= BTRFS_ROOT_ITEM_KEY
;
6840 key
.offset
= (u64
)-1;
6842 index
= srcu_read_lock(&fs_info
->subvol_srcu
);
6844 clone_root
= btrfs_read_fs_root_no_name(fs_info
, &key
);
6845 if (IS_ERR(clone_root
)) {
6846 srcu_read_unlock(&fs_info
->subvol_srcu
, index
);
6847 ret
= PTR_ERR(clone_root
);
6850 spin_lock(&clone_root
->root_item_lock
);
6851 if (!btrfs_root_readonly(clone_root
) ||
6852 btrfs_root_dead(clone_root
)) {
6853 spin_unlock(&clone_root
->root_item_lock
);
6854 srcu_read_unlock(&fs_info
->subvol_srcu
, index
);
6858 if (clone_root
->dedupe_in_progress
) {
6859 dedupe_in_progress_warn(clone_root
);
6860 spin_unlock(&clone_root
->root_item_lock
);
6861 srcu_read_unlock(&fs_info
->subvol_srcu
, index
);
6865 clone_root
->send_in_progress
++;
6866 spin_unlock(&clone_root
->root_item_lock
);
6867 srcu_read_unlock(&fs_info
->subvol_srcu
, index
);
6869 sctx
->clone_roots
[i
].root
= clone_root
;
6870 clone_sources_to_rollback
= i
+ 1;
6872 kvfree(clone_sources_tmp
);
6873 clone_sources_tmp
= NULL
;
6876 if (arg
->parent_root
) {
6877 key
.objectid
= arg
->parent_root
;
6878 key
.type
= BTRFS_ROOT_ITEM_KEY
;
6879 key
.offset
= (u64
)-1;
6881 index
= srcu_read_lock(&fs_info
->subvol_srcu
);
6883 sctx
->parent_root
= btrfs_read_fs_root_no_name(fs_info
, &key
);
6884 if (IS_ERR(sctx
->parent_root
)) {
6885 srcu_read_unlock(&fs_info
->subvol_srcu
, index
);
6886 ret
= PTR_ERR(sctx
->parent_root
);
6890 spin_lock(&sctx
->parent_root
->root_item_lock
);
6891 sctx
->parent_root
->send_in_progress
++;
6892 if (!btrfs_root_readonly(sctx
->parent_root
) ||
6893 btrfs_root_dead(sctx
->parent_root
)) {
6894 spin_unlock(&sctx
->parent_root
->root_item_lock
);
6895 srcu_read_unlock(&fs_info
->subvol_srcu
, index
);
6899 if (sctx
->parent_root
->dedupe_in_progress
) {
6900 dedupe_in_progress_warn(sctx
->parent_root
);
6901 spin_unlock(&sctx
->parent_root
->root_item_lock
);
6902 srcu_read_unlock(&fs_info
->subvol_srcu
, index
);
6906 spin_unlock(&sctx
->parent_root
->root_item_lock
);
6908 srcu_read_unlock(&fs_info
->subvol_srcu
, index
);
6912 * Clones from send_root are allowed, but only if the clone source
6913 * is behind the current send position. This is checked while searching
6914 * for possible clone sources.
6916 sctx
->clone_roots
[sctx
->clone_roots_cnt
++].root
= sctx
->send_root
;
6918 /* We do a bsearch later */
6919 sort(sctx
->clone_roots
, sctx
->clone_roots_cnt
,
6920 sizeof(*sctx
->clone_roots
), __clone_root_cmp_sort
,
6922 sort_clone_roots
= 1;
6924 ret
= flush_delalloc_roots(sctx
);
6928 ret
= ensure_commit_roots_uptodate(sctx
);
6932 mutex_lock(&fs_info
->balance_mutex
);
6933 if (test_bit(BTRFS_FS_BALANCE_RUNNING
, &fs_info
->flags
)) {
6934 mutex_unlock(&fs_info
->balance_mutex
);
6935 btrfs_warn_rl(fs_info
,
6936 "cannot run send because a balance operation is in progress");
6940 fs_info
->send_in_progress
++;
6941 mutex_unlock(&fs_info
->balance_mutex
);
6943 current
->journal_info
= BTRFS_SEND_TRANS_STUB
;
6944 ret
= send_subvol(sctx
);
6945 current
->journal_info
= NULL
;
6946 mutex_lock(&fs_info
->balance_mutex
);
6947 fs_info
->send_in_progress
--;
6948 mutex_unlock(&fs_info
->balance_mutex
);
6952 if (!(sctx
->flags
& BTRFS_SEND_FLAG_OMIT_END_CMD
)) {
6953 ret
= begin_cmd(sctx
, BTRFS_SEND_C_END
);
6956 ret
= send_cmd(sctx
);
6962 WARN_ON(sctx
&& !ret
&& !RB_EMPTY_ROOT(&sctx
->pending_dir_moves
));
6963 while (sctx
&& !RB_EMPTY_ROOT(&sctx
->pending_dir_moves
)) {
6965 struct pending_dir_move
*pm
;
6967 n
= rb_first(&sctx
->pending_dir_moves
);
6968 pm
= rb_entry(n
, struct pending_dir_move
, node
);
6969 while (!list_empty(&pm
->list
)) {
6970 struct pending_dir_move
*pm2
;
6972 pm2
= list_first_entry(&pm
->list
,
6973 struct pending_dir_move
, list
);
6974 free_pending_move(sctx
, pm2
);
6976 free_pending_move(sctx
, pm
);
6979 WARN_ON(sctx
&& !ret
&& !RB_EMPTY_ROOT(&sctx
->waiting_dir_moves
));
6980 while (sctx
&& !RB_EMPTY_ROOT(&sctx
->waiting_dir_moves
)) {
6982 struct waiting_dir_move
*dm
;
6984 n
= rb_first(&sctx
->waiting_dir_moves
);
6985 dm
= rb_entry(n
, struct waiting_dir_move
, node
);
6986 rb_erase(&dm
->node
, &sctx
->waiting_dir_moves
);
6990 WARN_ON(sctx
&& !ret
&& !RB_EMPTY_ROOT(&sctx
->orphan_dirs
));
6991 while (sctx
&& !RB_EMPTY_ROOT(&sctx
->orphan_dirs
)) {
6993 struct orphan_dir_info
*odi
;
6995 n
= rb_first(&sctx
->orphan_dirs
);
6996 odi
= rb_entry(n
, struct orphan_dir_info
, node
);
6997 free_orphan_dir_info(sctx
, odi
);
7000 if (sort_clone_roots
) {
7001 for (i
= 0; i
< sctx
->clone_roots_cnt
; i
++)
7002 btrfs_root_dec_send_in_progress(
7003 sctx
->clone_roots
[i
].root
);
7005 for (i
= 0; sctx
&& i
< clone_sources_to_rollback
; i
++)
7006 btrfs_root_dec_send_in_progress(
7007 sctx
->clone_roots
[i
].root
);
7009 btrfs_root_dec_send_in_progress(send_root
);
7011 if (sctx
&& !IS_ERR_OR_NULL(sctx
->parent_root
))
7012 btrfs_root_dec_send_in_progress(sctx
->parent_root
);
7014 kvfree(clone_sources_tmp
);
7017 if (sctx
->send_filp
)
7018 fput(sctx
->send_filp
);
7020 kvfree(sctx
->clone_roots
);
7021 kvfree(sctx
->send_buf
);
7022 kvfree(sctx
->read_buf
);
7024 name_cache_free(sctx
);