1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2012 Alexander Block. All rights reserved.
6 #include <linux/bsearch.h>
8 #include <linux/file.h>
9 #include <linux/sort.h>
10 #include <linux/mount.h>
11 #include <linux/xattr.h>
12 #include <linux/posix_acl_xattr.h>
13 #include <linux/radix-tree.h>
14 #include <linux/vmalloc.h>
15 #include <linux/string.h>
16 #include <linux/compat.h>
17 #include <linux/crc32c.h>
23 #include "btrfs_inode.h"
24 #include "transaction.h"
25 #include "compression.h"
28 * A fs_path is a helper to dynamically build path names with unknown size.
29 * It reallocates the internal buffer on demand.
30 * It allows fast adding of path elements on the right side (normal path) and
31 * fast adding to the left side (reversed path). A reversed path can also be
32 * unreversed if needed.
41 unsigned short buf_len
:15;
42 unsigned short reversed
:1;
46 * Average path length does not exceed 200 bytes, we'll have
47 * better packing in the slab and higher chance to satisfy
48 * a allocation later during send.
53 #define FS_PATH_INLINE_SIZE \
54 (sizeof(struct fs_path) - offsetof(struct fs_path, inline_buf))
57 /* reused for each extent */
59 struct btrfs_root
*root
;
66 #define SEND_CTX_MAX_NAME_CACHE_SIZE 128
67 #define SEND_CTX_NAME_CACHE_CLEAN_SIZE (SEND_CTX_MAX_NAME_CACHE_SIZE * 2)
70 struct file
*send_filp
;
76 u64 cmd_send_size
[BTRFS_SEND_C_MAX
+ 1];
77 u64 flags
; /* 'flags' member of btrfs_ioctl_send_args is u64 */
79 struct btrfs_root
*send_root
;
80 struct btrfs_root
*parent_root
;
81 struct clone_root
*clone_roots
;
84 /* current state of the compare_tree call */
85 struct btrfs_path
*left_path
;
86 struct btrfs_path
*right_path
;
87 struct btrfs_key
*cmp_key
;
90 * infos of the currently processed inode. In case of deleted inodes,
91 * these are the values from the deleted inode.
96 int cur_inode_new_gen
;
97 int cur_inode_deleted
;
101 u64 cur_inode_last_extent
;
102 u64 cur_inode_next_write_offset
;
103 bool ignore_cur_inode
;
107 struct list_head new_refs
;
108 struct list_head deleted_refs
;
110 struct radix_tree_root name_cache
;
111 struct list_head name_cache_list
;
114 struct file_ra_state ra
;
119 * We process inodes by their increasing order, so if before an
120 * incremental send we reverse the parent/child relationship of
121 * directories such that a directory with a lower inode number was
122 * the parent of a directory with a higher inode number, and the one
123 * becoming the new parent got renamed too, we can't rename/move the
124 * directory with lower inode number when we finish processing it - we
125 * must process the directory with higher inode number first, then
126 * rename/move it and then rename/move the directory with lower inode
127 * number. Example follows.
129 * Tree state when the first send was performed:
141 * Tree state when the second (incremental) send is performed:
150 * The sequence of steps that lead to the second state was:
152 * mv /a/b/c/d /a/b/c2/d2
153 * mv /a/b/c /a/b/c2/d2/cc
155 * "c" has lower inode number, but we can't move it (2nd mv operation)
156 * before we move "d", which has higher inode number.
158 * So we just memorize which move/rename operations must be performed
159 * later when their respective parent is processed and moved/renamed.
162 /* Indexed by parent directory inode number. */
163 struct rb_root pending_dir_moves
;
166 * Reverse index, indexed by the inode number of a directory that
167 * is waiting for the move/rename of its immediate parent before its
168 * own move/rename can be performed.
170 struct rb_root waiting_dir_moves
;
173 * A directory that is going to be rm'ed might have a child directory
174 * which is in the pending directory moves index above. In this case,
175 * the directory can only be removed after the move/rename of its child
176 * is performed. Example:
196 * Sequence of steps that lead to the send snapshot:
197 * rm -f /a/b/c/foo.txt
199 * mv /a/b/c/x /a/b/YY
202 * When the child is processed, its move/rename is delayed until its
203 * parent is processed (as explained above), but all other operations
204 * like update utimes, chown, chgrp, etc, are performed and the paths
205 * that it uses for those operations must use the orphanized name of
206 * its parent (the directory we're going to rm later), so we need to
207 * memorize that name.
209 * Indexed by the inode number of the directory to be deleted.
211 struct rb_root orphan_dirs
;
214 struct pending_dir_move
{
216 struct list_head list
;
220 struct list_head update_refs
;
223 struct waiting_dir_move
{
227 * There might be some directory that could not be removed because it
228 * was waiting for this directory inode to be moved first. Therefore
229 * after this directory is moved, we can try to rmdir the ino rmdir_ino.
235 struct orphan_dir_info
{
239 u64 last_dir_index_offset
;
242 struct name_cache_entry
{
243 struct list_head list
;
245 * radix_tree has only 32bit entries but we need to handle 64bit inums.
246 * We use the lower 32bit of the 64bit inum to store it in the tree. If
247 * more then one inum would fall into the same entry, we use radix_list
248 * to store the additional entries. radix_list is also used to store
249 * entries where two entries have the same inum but different
252 struct list_head radix_list
;
258 int need_later_update
;
264 static void inconsistent_snapshot_error(struct send_ctx
*sctx
,
265 enum btrfs_compare_tree_result result
,
268 const char *result_string
;
271 case BTRFS_COMPARE_TREE_NEW
:
272 result_string
= "new";
274 case BTRFS_COMPARE_TREE_DELETED
:
275 result_string
= "deleted";
277 case BTRFS_COMPARE_TREE_CHANGED
:
278 result_string
= "updated";
280 case BTRFS_COMPARE_TREE_SAME
:
282 result_string
= "unchanged";
286 result_string
= "unexpected";
289 btrfs_err(sctx
->send_root
->fs_info
,
290 "Send: inconsistent snapshot, found %s %s for inode %llu without updated inode item, send root is %llu, parent root is %llu",
291 result_string
, what
, sctx
->cmp_key
->objectid
,
292 sctx
->send_root
->root_key
.objectid
,
294 sctx
->parent_root
->root_key
.objectid
: 0));
297 static int is_waiting_for_move(struct send_ctx
*sctx
, u64 ino
);
299 static struct waiting_dir_move
*
300 get_waiting_dir_move(struct send_ctx
*sctx
, u64 ino
);
302 static int is_waiting_for_rm(struct send_ctx
*sctx
, u64 dir_ino
);
304 static int need_send_hole(struct send_ctx
*sctx
)
306 return (sctx
->parent_root
&& !sctx
->cur_inode_new
&&
307 !sctx
->cur_inode_new_gen
&& !sctx
->cur_inode_deleted
&&
308 S_ISREG(sctx
->cur_inode_mode
));
311 static void fs_path_reset(struct fs_path
*p
)
314 p
->start
= p
->buf
+ p
->buf_len
- 1;
324 static struct fs_path
*fs_path_alloc(void)
328 p
= kmalloc(sizeof(*p
), GFP_KERNEL
);
332 p
->buf
= p
->inline_buf
;
333 p
->buf_len
= FS_PATH_INLINE_SIZE
;
338 static struct fs_path
*fs_path_alloc_reversed(void)
350 static void fs_path_free(struct fs_path
*p
)
354 if (p
->buf
!= p
->inline_buf
)
359 static int fs_path_len(struct fs_path
*p
)
361 return p
->end
- p
->start
;
364 static int fs_path_ensure_buf(struct fs_path
*p
, int len
)
372 if (p
->buf_len
>= len
)
375 if (len
> PATH_MAX
) {
380 path_len
= p
->end
- p
->start
;
381 old_buf_len
= p
->buf_len
;
384 * First time the inline_buf does not suffice
386 if (p
->buf
== p
->inline_buf
) {
387 tmp_buf
= kmalloc(len
, GFP_KERNEL
);
389 memcpy(tmp_buf
, p
->buf
, old_buf_len
);
391 tmp_buf
= krealloc(p
->buf
, len
, GFP_KERNEL
);
397 * The real size of the buffer is bigger, this will let the fast path
398 * happen most of the time
400 p
->buf_len
= ksize(p
->buf
);
403 tmp_buf
= p
->buf
+ old_buf_len
- path_len
- 1;
404 p
->end
= p
->buf
+ p
->buf_len
- 1;
405 p
->start
= p
->end
- path_len
;
406 memmove(p
->start
, tmp_buf
, path_len
+ 1);
409 p
->end
= p
->start
+ path_len
;
414 static int fs_path_prepare_for_add(struct fs_path
*p
, int name_len
,
420 new_len
= p
->end
- p
->start
+ name_len
;
421 if (p
->start
!= p
->end
)
423 ret
= fs_path_ensure_buf(p
, new_len
);
428 if (p
->start
!= p
->end
)
430 p
->start
-= name_len
;
431 *prepared
= p
->start
;
433 if (p
->start
!= p
->end
)
444 static int fs_path_add(struct fs_path
*p
, const char *name
, int name_len
)
449 ret
= fs_path_prepare_for_add(p
, name_len
, &prepared
);
452 memcpy(prepared
, name
, name_len
);
458 static int fs_path_add_path(struct fs_path
*p
, struct fs_path
*p2
)
463 ret
= fs_path_prepare_for_add(p
, p2
->end
- p2
->start
, &prepared
);
466 memcpy(prepared
, p2
->start
, p2
->end
- p2
->start
);
472 static int fs_path_add_from_extent_buffer(struct fs_path
*p
,
473 struct extent_buffer
*eb
,
474 unsigned long off
, int len
)
479 ret
= fs_path_prepare_for_add(p
, len
, &prepared
);
483 read_extent_buffer(eb
, prepared
, off
, len
);
489 static int fs_path_copy(struct fs_path
*p
, struct fs_path
*from
)
493 p
->reversed
= from
->reversed
;
496 ret
= fs_path_add_path(p
, from
);
502 static void fs_path_unreverse(struct fs_path
*p
)
511 len
= p
->end
- p
->start
;
513 p
->end
= p
->start
+ len
;
514 memmove(p
->start
, tmp
, len
+ 1);
518 static struct btrfs_path
*alloc_path_for_send(void)
520 struct btrfs_path
*path
;
522 path
= btrfs_alloc_path();
525 path
->search_commit_root
= 1;
526 path
->skip_locking
= 1;
527 path
->need_commit_sem
= 1;
531 static int write_buf(struct file
*filp
, const void *buf
, u32 len
, loff_t
*off
)
537 ret
= kernel_write(filp
, buf
+ pos
, len
- pos
, off
);
538 /* TODO handle that correctly */
539 /*if (ret == -ERESTARTSYS) {
553 static int tlv_put(struct send_ctx
*sctx
, u16 attr
, const void *data
, int len
)
555 struct btrfs_tlv_header
*hdr
;
556 int total_len
= sizeof(*hdr
) + len
;
557 int left
= sctx
->send_max_size
- sctx
->send_size
;
559 if (unlikely(left
< total_len
))
562 hdr
= (struct btrfs_tlv_header
*) (sctx
->send_buf
+ sctx
->send_size
);
563 hdr
->tlv_type
= cpu_to_le16(attr
);
564 hdr
->tlv_len
= cpu_to_le16(len
);
565 memcpy(hdr
+ 1, data
, len
);
566 sctx
->send_size
+= total_len
;
571 #define TLV_PUT_DEFINE_INT(bits) \
572 static int tlv_put_u##bits(struct send_ctx *sctx, \
573 u##bits attr, u##bits value) \
575 __le##bits __tmp = cpu_to_le##bits(value); \
576 return tlv_put(sctx, attr, &__tmp, sizeof(__tmp)); \
579 TLV_PUT_DEFINE_INT(64)
581 static int tlv_put_string(struct send_ctx
*sctx
, u16 attr
,
582 const char *str
, int len
)
586 return tlv_put(sctx
, attr
, str
, len
);
589 static int tlv_put_uuid(struct send_ctx
*sctx
, u16 attr
,
592 return tlv_put(sctx
, attr
, uuid
, BTRFS_UUID_SIZE
);
595 static int tlv_put_btrfs_timespec(struct send_ctx
*sctx
, u16 attr
,
596 struct extent_buffer
*eb
,
597 struct btrfs_timespec
*ts
)
599 struct btrfs_timespec bts
;
600 read_extent_buffer(eb
, &bts
, (unsigned long)ts
, sizeof(bts
));
601 return tlv_put(sctx
, attr
, &bts
, sizeof(bts
));
605 #define TLV_PUT(sctx, attrtype, data, attrlen) \
607 ret = tlv_put(sctx, attrtype, data, attrlen); \
609 goto tlv_put_failure; \
612 #define TLV_PUT_INT(sctx, attrtype, bits, value) \
614 ret = tlv_put_u##bits(sctx, attrtype, value); \
616 goto tlv_put_failure; \
619 #define TLV_PUT_U8(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 8, data)
620 #define TLV_PUT_U16(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 16, data)
621 #define TLV_PUT_U32(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 32, data)
622 #define TLV_PUT_U64(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 64, data)
623 #define TLV_PUT_STRING(sctx, attrtype, str, len) \
625 ret = tlv_put_string(sctx, attrtype, str, len); \
627 goto tlv_put_failure; \
629 #define TLV_PUT_PATH(sctx, attrtype, p) \
631 ret = tlv_put_string(sctx, attrtype, p->start, \
632 p->end - p->start); \
634 goto tlv_put_failure; \
636 #define TLV_PUT_UUID(sctx, attrtype, uuid) \
638 ret = tlv_put_uuid(sctx, attrtype, uuid); \
640 goto tlv_put_failure; \
642 #define TLV_PUT_BTRFS_TIMESPEC(sctx, attrtype, eb, ts) \
644 ret = tlv_put_btrfs_timespec(sctx, attrtype, eb, ts); \
646 goto tlv_put_failure; \
649 static int send_header(struct send_ctx
*sctx
)
651 struct btrfs_stream_header hdr
;
653 strcpy(hdr
.magic
, BTRFS_SEND_STREAM_MAGIC
);
654 hdr
.version
= cpu_to_le32(BTRFS_SEND_STREAM_VERSION
);
656 return write_buf(sctx
->send_filp
, &hdr
, sizeof(hdr
),
661 * For each command/item we want to send to userspace, we call this function.
663 static int begin_cmd(struct send_ctx
*sctx
, int cmd
)
665 struct btrfs_cmd_header
*hdr
;
667 if (WARN_ON(!sctx
->send_buf
))
670 BUG_ON(sctx
->send_size
);
672 sctx
->send_size
+= sizeof(*hdr
);
673 hdr
= (struct btrfs_cmd_header
*)sctx
->send_buf
;
674 hdr
->cmd
= cpu_to_le16(cmd
);
679 static int send_cmd(struct send_ctx
*sctx
)
682 struct btrfs_cmd_header
*hdr
;
685 hdr
= (struct btrfs_cmd_header
*)sctx
->send_buf
;
686 hdr
->len
= cpu_to_le32(sctx
->send_size
- sizeof(*hdr
));
689 crc
= crc32c(0, (unsigned char *)sctx
->send_buf
, sctx
->send_size
);
690 hdr
->crc
= cpu_to_le32(crc
);
692 ret
= write_buf(sctx
->send_filp
, sctx
->send_buf
, sctx
->send_size
,
695 sctx
->total_send_size
+= sctx
->send_size
;
696 sctx
->cmd_send_size
[le16_to_cpu(hdr
->cmd
)] += sctx
->send_size
;
703 * Sends a move instruction to user space
705 static int send_rename(struct send_ctx
*sctx
,
706 struct fs_path
*from
, struct fs_path
*to
)
708 struct btrfs_fs_info
*fs_info
= sctx
->send_root
->fs_info
;
711 btrfs_debug(fs_info
, "send_rename %s -> %s", from
->start
, to
->start
);
713 ret
= begin_cmd(sctx
, BTRFS_SEND_C_RENAME
);
717 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH
, from
);
718 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH_TO
, to
);
720 ret
= send_cmd(sctx
);
728 * Sends a link instruction to user space
730 static int send_link(struct send_ctx
*sctx
,
731 struct fs_path
*path
, struct fs_path
*lnk
)
733 struct btrfs_fs_info
*fs_info
= sctx
->send_root
->fs_info
;
736 btrfs_debug(fs_info
, "send_link %s -> %s", path
->start
, lnk
->start
);
738 ret
= begin_cmd(sctx
, BTRFS_SEND_C_LINK
);
742 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH
, path
);
743 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH_LINK
, lnk
);
745 ret
= send_cmd(sctx
);
753 * Sends an unlink instruction to user space
755 static int send_unlink(struct send_ctx
*sctx
, struct fs_path
*path
)
757 struct btrfs_fs_info
*fs_info
= sctx
->send_root
->fs_info
;
760 btrfs_debug(fs_info
, "send_unlink %s", path
->start
);
762 ret
= begin_cmd(sctx
, BTRFS_SEND_C_UNLINK
);
766 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH
, path
);
768 ret
= send_cmd(sctx
);
776 * Sends a rmdir instruction to user space
778 static int send_rmdir(struct send_ctx
*sctx
, struct fs_path
*path
)
780 struct btrfs_fs_info
*fs_info
= sctx
->send_root
->fs_info
;
783 btrfs_debug(fs_info
, "send_rmdir %s", path
->start
);
785 ret
= begin_cmd(sctx
, BTRFS_SEND_C_RMDIR
);
789 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH
, path
);
791 ret
= send_cmd(sctx
);
799 * Helper function to retrieve some fields from an inode item.
801 static int __get_inode_info(struct btrfs_root
*root
, struct btrfs_path
*path
,
802 u64 ino
, u64
*size
, u64
*gen
, u64
*mode
, u64
*uid
,
806 struct btrfs_inode_item
*ii
;
807 struct btrfs_key key
;
810 key
.type
= BTRFS_INODE_ITEM_KEY
;
812 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
819 ii
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
820 struct btrfs_inode_item
);
822 *size
= btrfs_inode_size(path
->nodes
[0], ii
);
824 *gen
= btrfs_inode_generation(path
->nodes
[0], ii
);
826 *mode
= btrfs_inode_mode(path
->nodes
[0], ii
);
828 *uid
= btrfs_inode_uid(path
->nodes
[0], ii
);
830 *gid
= btrfs_inode_gid(path
->nodes
[0], ii
);
832 *rdev
= btrfs_inode_rdev(path
->nodes
[0], ii
);
837 static int get_inode_info(struct btrfs_root
*root
,
838 u64 ino
, u64
*size
, u64
*gen
,
839 u64
*mode
, u64
*uid
, u64
*gid
,
842 struct btrfs_path
*path
;
845 path
= alloc_path_for_send();
848 ret
= __get_inode_info(root
, path
, ino
, size
, gen
, mode
, uid
, gid
,
850 btrfs_free_path(path
);
854 typedef int (*iterate_inode_ref_t
)(int num
, u64 dir
, int index
,
859 * Helper function to iterate the entries in ONE btrfs_inode_ref or
860 * btrfs_inode_extref.
861 * The iterate callback may return a non zero value to stop iteration. This can
862 * be a negative value for error codes or 1 to simply stop it.
864 * path must point to the INODE_REF or INODE_EXTREF when called.
866 static int iterate_inode_ref(struct btrfs_root
*root
, struct btrfs_path
*path
,
867 struct btrfs_key
*found_key
, int resolve
,
868 iterate_inode_ref_t iterate
, void *ctx
)
870 struct extent_buffer
*eb
= path
->nodes
[0];
871 struct btrfs_item
*item
;
872 struct btrfs_inode_ref
*iref
;
873 struct btrfs_inode_extref
*extref
;
874 struct btrfs_path
*tmp_path
;
878 int slot
= path
->slots
[0];
885 unsigned long name_off
;
886 unsigned long elem_size
;
889 p
= fs_path_alloc_reversed();
893 tmp_path
= alloc_path_for_send();
900 if (found_key
->type
== BTRFS_INODE_REF_KEY
) {
901 ptr
= (unsigned long)btrfs_item_ptr(eb
, slot
,
902 struct btrfs_inode_ref
);
903 item
= btrfs_item_nr(slot
);
904 total
= btrfs_item_size(eb
, item
);
905 elem_size
= sizeof(*iref
);
907 ptr
= btrfs_item_ptr_offset(eb
, slot
);
908 total
= btrfs_item_size_nr(eb
, slot
);
909 elem_size
= sizeof(*extref
);
912 while (cur
< total
) {
915 if (found_key
->type
== BTRFS_INODE_REF_KEY
) {
916 iref
= (struct btrfs_inode_ref
*)(ptr
+ cur
);
917 name_len
= btrfs_inode_ref_name_len(eb
, iref
);
918 name_off
= (unsigned long)(iref
+ 1);
919 index
= btrfs_inode_ref_index(eb
, iref
);
920 dir
= found_key
->offset
;
922 extref
= (struct btrfs_inode_extref
*)(ptr
+ cur
);
923 name_len
= btrfs_inode_extref_name_len(eb
, extref
);
924 name_off
= (unsigned long)&extref
->name
;
925 index
= btrfs_inode_extref_index(eb
, extref
);
926 dir
= btrfs_inode_extref_parent(eb
, extref
);
930 start
= btrfs_ref_to_path(root
, tmp_path
, name_len
,
934 ret
= PTR_ERR(start
);
937 if (start
< p
->buf
) {
938 /* overflow , try again with larger buffer */
939 ret
= fs_path_ensure_buf(p
,
940 p
->buf_len
+ p
->buf
- start
);
943 start
= btrfs_ref_to_path(root
, tmp_path
,
948 ret
= PTR_ERR(start
);
951 BUG_ON(start
< p
->buf
);
955 ret
= fs_path_add_from_extent_buffer(p
, eb
, name_off
,
961 cur
+= elem_size
+ name_len
;
962 ret
= iterate(num
, dir
, index
, p
, ctx
);
969 btrfs_free_path(tmp_path
);
974 typedef int (*iterate_dir_item_t
)(int num
, struct btrfs_key
*di_key
,
975 const char *name
, int name_len
,
976 const char *data
, int data_len
,
980 * Helper function to iterate the entries in ONE btrfs_dir_item.
981 * The iterate callback may return a non zero value to stop iteration. This can
982 * be a negative value for error codes or 1 to simply stop it.
984 * path must point to the dir item when called.
986 static int iterate_dir_item(struct btrfs_root
*root
, struct btrfs_path
*path
,
987 iterate_dir_item_t iterate
, void *ctx
)
990 struct extent_buffer
*eb
;
991 struct btrfs_item
*item
;
992 struct btrfs_dir_item
*di
;
993 struct btrfs_key di_key
;
1006 * Start with a small buffer (1 page). If later we end up needing more
1007 * space, which can happen for xattrs on a fs with a leaf size greater
1008 * then the page size, attempt to increase the buffer. Typically xattr
1012 buf
= kmalloc(buf_len
, GFP_KERNEL
);
1018 eb
= path
->nodes
[0];
1019 slot
= path
->slots
[0];
1020 item
= btrfs_item_nr(slot
);
1021 di
= btrfs_item_ptr(eb
, slot
, struct btrfs_dir_item
);
1024 total
= btrfs_item_size(eb
, item
);
1027 while (cur
< total
) {
1028 name_len
= btrfs_dir_name_len(eb
, di
);
1029 data_len
= btrfs_dir_data_len(eb
, di
);
1030 type
= btrfs_dir_type(eb
, di
);
1031 btrfs_dir_item_key_to_cpu(eb
, di
, &di_key
);
1033 if (type
== BTRFS_FT_XATTR
) {
1034 if (name_len
> XATTR_NAME_MAX
) {
1035 ret
= -ENAMETOOLONG
;
1038 if (name_len
+ data_len
>
1039 BTRFS_MAX_XATTR_SIZE(root
->fs_info
)) {
1047 if (name_len
+ data_len
> PATH_MAX
) {
1048 ret
= -ENAMETOOLONG
;
1053 if (name_len
+ data_len
> buf_len
) {
1054 buf_len
= name_len
+ data_len
;
1055 if (is_vmalloc_addr(buf
)) {
1059 char *tmp
= krealloc(buf
, buf_len
,
1060 GFP_KERNEL
| __GFP_NOWARN
);
1067 buf
= kvmalloc(buf_len
, GFP_KERNEL
);
1075 read_extent_buffer(eb
, buf
, (unsigned long)(di
+ 1),
1076 name_len
+ data_len
);
1078 len
= sizeof(*di
) + name_len
+ data_len
;
1079 di
= (struct btrfs_dir_item
*)((char *)di
+ len
);
1082 ret
= iterate(num
, &di_key
, buf
, name_len
, buf
+ name_len
,
1083 data_len
, type
, ctx
);
1099 static int __copy_first_ref(int num
, u64 dir
, int index
,
1100 struct fs_path
*p
, void *ctx
)
1103 struct fs_path
*pt
= ctx
;
1105 ret
= fs_path_copy(pt
, p
);
1109 /* we want the first only */
1114 * Retrieve the first path of an inode. If an inode has more then one
1115 * ref/hardlink, this is ignored.
1117 static int get_inode_path(struct btrfs_root
*root
,
1118 u64 ino
, struct fs_path
*path
)
1121 struct btrfs_key key
, found_key
;
1122 struct btrfs_path
*p
;
1124 p
= alloc_path_for_send();
1128 fs_path_reset(path
);
1131 key
.type
= BTRFS_INODE_REF_KEY
;
1134 ret
= btrfs_search_slot_for_read(root
, &key
, p
, 1, 0);
1141 btrfs_item_key_to_cpu(p
->nodes
[0], &found_key
, p
->slots
[0]);
1142 if (found_key
.objectid
!= ino
||
1143 (found_key
.type
!= BTRFS_INODE_REF_KEY
&&
1144 found_key
.type
!= BTRFS_INODE_EXTREF_KEY
)) {
1149 ret
= iterate_inode_ref(root
, p
, &found_key
, 1,
1150 __copy_first_ref
, path
);
1160 struct backref_ctx
{
1161 struct send_ctx
*sctx
;
1163 struct btrfs_path
*path
;
1164 /* number of total found references */
1168 * used for clones found in send_root. clones found behind cur_objectid
1169 * and cur_offset are not considered as allowed clones.
1174 /* may be truncated in case it's the last extent in a file */
1177 /* data offset in the file extent item */
1180 /* Just to check for bugs in backref resolving */
1184 static int __clone_root_cmp_bsearch(const void *key
, const void *elt
)
1186 u64 root
= (u64
)(uintptr_t)key
;
1187 struct clone_root
*cr
= (struct clone_root
*)elt
;
1189 if (root
< cr
->root
->objectid
)
1191 if (root
> cr
->root
->objectid
)
1196 static int __clone_root_cmp_sort(const void *e1
, const void *e2
)
1198 struct clone_root
*cr1
= (struct clone_root
*)e1
;
1199 struct clone_root
*cr2
= (struct clone_root
*)e2
;
1201 if (cr1
->root
->objectid
< cr2
->root
->objectid
)
1203 if (cr1
->root
->objectid
> cr2
->root
->objectid
)
1209 * Called for every backref that is found for the current extent.
1210 * Results are collected in sctx->clone_roots->ino/offset/found_refs
1212 static int __iterate_backrefs(u64 ino
, u64 offset
, u64 root
, void *ctx_
)
1214 struct backref_ctx
*bctx
= ctx_
;
1215 struct clone_root
*found
;
1219 /* First check if the root is in the list of accepted clone sources */
1220 found
= bsearch((void *)(uintptr_t)root
, bctx
->sctx
->clone_roots
,
1221 bctx
->sctx
->clone_roots_cnt
,
1222 sizeof(struct clone_root
),
1223 __clone_root_cmp_bsearch
);
1227 if (found
->root
== bctx
->sctx
->send_root
&&
1228 ino
== bctx
->cur_objectid
&&
1229 offset
== bctx
->cur_offset
) {
1230 bctx
->found_itself
= 1;
1234 * There are inodes that have extents that lie behind its i_size. Don't
1235 * accept clones from these extents.
1237 ret
= __get_inode_info(found
->root
, bctx
->path
, ino
, &i_size
, NULL
, NULL
,
1239 btrfs_release_path(bctx
->path
);
1243 if (offset
+ bctx
->data_offset
+ bctx
->extent_len
> i_size
)
1247 * Make sure we don't consider clones from send_root that are
1248 * behind the current inode/offset.
1250 if (found
->root
== bctx
->sctx
->send_root
) {
1252 * TODO for the moment we don't accept clones from the inode
1253 * that is currently send. We may change this when
1254 * BTRFS_IOC_CLONE_RANGE supports cloning from and to the same
1257 if (ino
>= bctx
->cur_objectid
)
1262 found
->found_refs
++;
1263 if (ino
< found
->ino
) {
1265 found
->offset
= offset
;
1266 } else if (found
->ino
== ino
) {
1268 * same extent found more then once in the same file.
1270 if (found
->offset
> offset
+ bctx
->extent_len
)
1271 found
->offset
= offset
;
1278 * Given an inode, offset and extent item, it finds a good clone for a clone
1279 * instruction. Returns -ENOENT when none could be found. The function makes
1280 * sure that the returned clone is usable at the point where sending is at the
1281 * moment. This means, that no clones are accepted which lie behind the current
1284 * path must point to the extent item when called.
1286 static int find_extent_clone(struct send_ctx
*sctx
,
1287 struct btrfs_path
*path
,
1288 u64 ino
, u64 data_offset
,
1290 struct clone_root
**found
)
1292 struct btrfs_fs_info
*fs_info
= sctx
->send_root
->fs_info
;
1298 u64 extent_item_pos
;
1300 struct btrfs_file_extent_item
*fi
;
1301 struct extent_buffer
*eb
= path
->nodes
[0];
1302 struct backref_ctx
*backref_ctx
= NULL
;
1303 struct clone_root
*cur_clone_root
;
1304 struct btrfs_key found_key
;
1305 struct btrfs_path
*tmp_path
;
1309 tmp_path
= alloc_path_for_send();
1313 /* We only use this path under the commit sem */
1314 tmp_path
->need_commit_sem
= 0;
1316 backref_ctx
= kmalloc(sizeof(*backref_ctx
), GFP_KERNEL
);
1322 backref_ctx
->path
= tmp_path
;
1324 if (data_offset
>= ino_size
) {
1326 * There may be extents that lie behind the file's size.
1327 * I at least had this in combination with snapshotting while
1328 * writing large files.
1334 fi
= btrfs_item_ptr(eb
, path
->slots
[0],
1335 struct btrfs_file_extent_item
);
1336 extent_type
= btrfs_file_extent_type(eb
, fi
);
1337 if (extent_type
== BTRFS_FILE_EXTENT_INLINE
) {
1341 compressed
= btrfs_file_extent_compression(eb
, fi
);
1343 num_bytes
= btrfs_file_extent_num_bytes(eb
, fi
);
1344 disk_byte
= btrfs_file_extent_disk_bytenr(eb
, fi
);
1345 if (disk_byte
== 0) {
1349 logical
= disk_byte
+ btrfs_file_extent_offset(eb
, fi
);
1351 down_read(&fs_info
->commit_root_sem
);
1352 ret
= extent_from_logical(fs_info
, disk_byte
, tmp_path
,
1353 &found_key
, &flags
);
1354 up_read(&fs_info
->commit_root_sem
);
1355 btrfs_release_path(tmp_path
);
1359 if (flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
) {
1365 * Setup the clone roots.
1367 for (i
= 0; i
< sctx
->clone_roots_cnt
; i
++) {
1368 cur_clone_root
= sctx
->clone_roots
+ i
;
1369 cur_clone_root
->ino
= (u64
)-1;
1370 cur_clone_root
->offset
= 0;
1371 cur_clone_root
->found_refs
= 0;
1374 backref_ctx
->sctx
= sctx
;
1375 backref_ctx
->found
= 0;
1376 backref_ctx
->cur_objectid
= ino
;
1377 backref_ctx
->cur_offset
= data_offset
;
1378 backref_ctx
->found_itself
= 0;
1379 backref_ctx
->extent_len
= num_bytes
;
1381 * For non-compressed extents iterate_extent_inodes() gives us extent
1382 * offsets that already take into account the data offset, but not for
1383 * compressed extents, since the offset is logical and not relative to
1384 * the physical extent locations. We must take this into account to
1385 * avoid sending clone offsets that go beyond the source file's size,
1386 * which would result in the clone ioctl failing with -EINVAL on the
1389 if (compressed
== BTRFS_COMPRESS_NONE
)
1390 backref_ctx
->data_offset
= 0;
1392 backref_ctx
->data_offset
= btrfs_file_extent_offset(eb
, fi
);
1395 * The last extent of a file may be too large due to page alignment.
1396 * We need to adjust extent_len in this case so that the checks in
1397 * __iterate_backrefs work.
1399 if (data_offset
+ num_bytes
>= ino_size
)
1400 backref_ctx
->extent_len
= ino_size
- data_offset
;
1403 * Now collect all backrefs.
1405 if (compressed
== BTRFS_COMPRESS_NONE
)
1406 extent_item_pos
= logical
- found_key
.objectid
;
1408 extent_item_pos
= 0;
1409 ret
= iterate_extent_inodes(fs_info
, found_key
.objectid
,
1410 extent_item_pos
, 1, __iterate_backrefs
,
1411 backref_ctx
, false);
1416 if (!backref_ctx
->found_itself
) {
1417 /* found a bug in backref code? */
1420 "did not find backref in send_root. inode=%llu, offset=%llu, disk_byte=%llu found extent=%llu",
1421 ino
, data_offset
, disk_byte
, found_key
.objectid
);
1425 btrfs_debug(fs_info
,
1426 "find_extent_clone: data_offset=%llu, ino=%llu, num_bytes=%llu, logical=%llu",
1427 data_offset
, ino
, num_bytes
, logical
);
1429 if (!backref_ctx
->found
)
1430 btrfs_debug(fs_info
, "no clones found");
1432 cur_clone_root
= NULL
;
1433 for (i
= 0; i
< sctx
->clone_roots_cnt
; i
++) {
1434 if (sctx
->clone_roots
[i
].found_refs
) {
1435 if (!cur_clone_root
)
1436 cur_clone_root
= sctx
->clone_roots
+ i
;
1437 else if (sctx
->clone_roots
[i
].root
== sctx
->send_root
)
1438 /* prefer clones from send_root over others */
1439 cur_clone_root
= sctx
->clone_roots
+ i
;
1444 if (cur_clone_root
) {
1445 *found
= cur_clone_root
;
1452 btrfs_free_path(tmp_path
);
1457 static int read_symlink(struct btrfs_root
*root
,
1459 struct fs_path
*dest
)
1462 struct btrfs_path
*path
;
1463 struct btrfs_key key
;
1464 struct btrfs_file_extent_item
*ei
;
1470 path
= alloc_path_for_send();
1475 key
.type
= BTRFS_EXTENT_DATA_KEY
;
1477 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
1482 * An empty symlink inode. Can happen in rare error paths when
1483 * creating a symlink (transaction committed before the inode
1484 * eviction handler removed the symlink inode items and a crash
1485 * happened in between or the subvol was snapshoted in between).
1486 * Print an informative message to dmesg/syslog so that the user
1487 * can delete the symlink.
1489 btrfs_err(root
->fs_info
,
1490 "Found empty symlink inode %llu at root %llu",
1491 ino
, root
->root_key
.objectid
);
1496 ei
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
1497 struct btrfs_file_extent_item
);
1498 type
= btrfs_file_extent_type(path
->nodes
[0], ei
);
1499 compression
= btrfs_file_extent_compression(path
->nodes
[0], ei
);
1500 BUG_ON(type
!= BTRFS_FILE_EXTENT_INLINE
);
1501 BUG_ON(compression
);
1503 off
= btrfs_file_extent_inline_start(ei
);
1504 len
= btrfs_file_extent_ram_bytes(path
->nodes
[0], ei
);
1506 ret
= fs_path_add_from_extent_buffer(dest
, path
->nodes
[0], off
, len
);
1509 btrfs_free_path(path
);
1514 * Helper function to generate a file name that is unique in the root of
1515 * send_root and parent_root. This is used to generate names for orphan inodes.
1517 static int gen_unique_name(struct send_ctx
*sctx
,
1519 struct fs_path
*dest
)
1522 struct btrfs_path
*path
;
1523 struct btrfs_dir_item
*di
;
1528 path
= alloc_path_for_send();
1533 len
= snprintf(tmp
, sizeof(tmp
), "o%llu-%llu-%llu",
1535 ASSERT(len
< sizeof(tmp
));
1537 di
= btrfs_lookup_dir_item(NULL
, sctx
->send_root
,
1538 path
, BTRFS_FIRST_FREE_OBJECTID
,
1539 tmp
, strlen(tmp
), 0);
1540 btrfs_release_path(path
);
1546 /* not unique, try again */
1551 if (!sctx
->parent_root
) {
1557 di
= btrfs_lookup_dir_item(NULL
, sctx
->parent_root
,
1558 path
, BTRFS_FIRST_FREE_OBJECTID
,
1559 tmp
, strlen(tmp
), 0);
1560 btrfs_release_path(path
);
1566 /* not unique, try again */
1574 ret
= fs_path_add(dest
, tmp
, strlen(tmp
));
1577 btrfs_free_path(path
);
1582 inode_state_no_change
,
1583 inode_state_will_create
,
1584 inode_state_did_create
,
1585 inode_state_will_delete
,
1586 inode_state_did_delete
,
1589 static int get_cur_inode_state(struct send_ctx
*sctx
, u64 ino
, u64 gen
)
1597 ret
= get_inode_info(sctx
->send_root
, ino
, NULL
, &left_gen
, NULL
, NULL
,
1599 if (ret
< 0 && ret
!= -ENOENT
)
1603 if (!sctx
->parent_root
) {
1604 right_ret
= -ENOENT
;
1606 ret
= get_inode_info(sctx
->parent_root
, ino
, NULL
, &right_gen
,
1607 NULL
, NULL
, NULL
, NULL
);
1608 if (ret
< 0 && ret
!= -ENOENT
)
1613 if (!left_ret
&& !right_ret
) {
1614 if (left_gen
== gen
&& right_gen
== gen
) {
1615 ret
= inode_state_no_change
;
1616 } else if (left_gen
== gen
) {
1617 if (ino
< sctx
->send_progress
)
1618 ret
= inode_state_did_create
;
1620 ret
= inode_state_will_create
;
1621 } else if (right_gen
== gen
) {
1622 if (ino
< sctx
->send_progress
)
1623 ret
= inode_state_did_delete
;
1625 ret
= inode_state_will_delete
;
1629 } else if (!left_ret
) {
1630 if (left_gen
== gen
) {
1631 if (ino
< sctx
->send_progress
)
1632 ret
= inode_state_did_create
;
1634 ret
= inode_state_will_create
;
1638 } else if (!right_ret
) {
1639 if (right_gen
== gen
) {
1640 if (ino
< sctx
->send_progress
)
1641 ret
= inode_state_did_delete
;
1643 ret
= inode_state_will_delete
;
1655 static int is_inode_existent(struct send_ctx
*sctx
, u64 ino
, u64 gen
)
1659 if (ino
== BTRFS_FIRST_FREE_OBJECTID
)
1662 ret
= get_cur_inode_state(sctx
, ino
, gen
);
1666 if (ret
== inode_state_no_change
||
1667 ret
== inode_state_did_create
||
1668 ret
== inode_state_will_delete
)
1678 * Helper function to lookup a dir item in a dir.
1680 static int lookup_dir_item_inode(struct btrfs_root
*root
,
1681 u64 dir
, const char *name
, int name_len
,
1686 struct btrfs_dir_item
*di
;
1687 struct btrfs_key key
;
1688 struct btrfs_path
*path
;
1690 path
= alloc_path_for_send();
1694 di
= btrfs_lookup_dir_item(NULL
, root
, path
,
1695 dir
, name
, name_len
, 0);
1704 btrfs_dir_item_key_to_cpu(path
->nodes
[0], di
, &key
);
1705 if (key
.type
== BTRFS_ROOT_ITEM_KEY
) {
1709 *found_inode
= key
.objectid
;
1710 *found_type
= btrfs_dir_type(path
->nodes
[0], di
);
1713 btrfs_free_path(path
);
1718 * Looks up the first btrfs_inode_ref of a given ino. It returns the parent dir,
1719 * generation of the parent dir and the name of the dir entry.
1721 static int get_first_ref(struct btrfs_root
*root
, u64 ino
,
1722 u64
*dir
, u64
*dir_gen
, struct fs_path
*name
)
1725 struct btrfs_key key
;
1726 struct btrfs_key found_key
;
1727 struct btrfs_path
*path
;
1731 path
= alloc_path_for_send();
1736 key
.type
= BTRFS_INODE_REF_KEY
;
1739 ret
= btrfs_search_slot_for_read(root
, &key
, path
, 1, 0);
1743 btrfs_item_key_to_cpu(path
->nodes
[0], &found_key
,
1745 if (ret
|| found_key
.objectid
!= ino
||
1746 (found_key
.type
!= BTRFS_INODE_REF_KEY
&&
1747 found_key
.type
!= BTRFS_INODE_EXTREF_KEY
)) {
1752 if (found_key
.type
== BTRFS_INODE_REF_KEY
) {
1753 struct btrfs_inode_ref
*iref
;
1754 iref
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
1755 struct btrfs_inode_ref
);
1756 len
= btrfs_inode_ref_name_len(path
->nodes
[0], iref
);
1757 ret
= fs_path_add_from_extent_buffer(name
, path
->nodes
[0],
1758 (unsigned long)(iref
+ 1),
1760 parent_dir
= found_key
.offset
;
1762 struct btrfs_inode_extref
*extref
;
1763 extref
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
1764 struct btrfs_inode_extref
);
1765 len
= btrfs_inode_extref_name_len(path
->nodes
[0], extref
);
1766 ret
= fs_path_add_from_extent_buffer(name
, path
->nodes
[0],
1767 (unsigned long)&extref
->name
, len
);
1768 parent_dir
= btrfs_inode_extref_parent(path
->nodes
[0], extref
);
1772 btrfs_release_path(path
);
1775 ret
= get_inode_info(root
, parent_dir
, NULL
, dir_gen
, NULL
,
1784 btrfs_free_path(path
);
1788 static int is_first_ref(struct btrfs_root
*root
,
1790 const char *name
, int name_len
)
1793 struct fs_path
*tmp_name
;
1796 tmp_name
= fs_path_alloc();
1800 ret
= get_first_ref(root
, ino
, &tmp_dir
, NULL
, tmp_name
);
1804 if (dir
!= tmp_dir
|| name_len
!= fs_path_len(tmp_name
)) {
1809 ret
= !memcmp(tmp_name
->start
, name
, name_len
);
1812 fs_path_free(tmp_name
);
1817 * Used by process_recorded_refs to determine if a new ref would overwrite an
1818 * already existing ref. In case it detects an overwrite, it returns the
1819 * inode/gen in who_ino/who_gen.
1820 * When an overwrite is detected, process_recorded_refs does proper orphanizing
1821 * to make sure later references to the overwritten inode are possible.
1822 * Orphanizing is however only required for the first ref of an inode.
1823 * process_recorded_refs does an additional is_first_ref check to see if
1824 * orphanizing is really required.
1826 static int will_overwrite_ref(struct send_ctx
*sctx
, u64 dir
, u64 dir_gen
,
1827 const char *name
, int name_len
,
1828 u64
*who_ino
, u64
*who_gen
, u64
*who_mode
)
1832 u64 other_inode
= 0;
1835 if (!sctx
->parent_root
)
1838 ret
= is_inode_existent(sctx
, dir
, dir_gen
);
1843 * If we have a parent root we need to verify that the parent dir was
1844 * not deleted and then re-created, if it was then we have no overwrite
1845 * and we can just unlink this entry.
1847 if (sctx
->parent_root
&& dir
!= BTRFS_FIRST_FREE_OBJECTID
) {
1848 ret
= get_inode_info(sctx
->parent_root
, dir
, NULL
, &gen
, NULL
,
1850 if (ret
< 0 && ret
!= -ENOENT
)
1860 ret
= lookup_dir_item_inode(sctx
->parent_root
, dir
, name
, name_len
,
1861 &other_inode
, &other_type
);
1862 if (ret
< 0 && ret
!= -ENOENT
)
1870 * Check if the overwritten ref was already processed. If yes, the ref
1871 * was already unlinked/moved, so we can safely assume that we will not
1872 * overwrite anything at this point in time.
1874 if (other_inode
> sctx
->send_progress
||
1875 is_waiting_for_move(sctx
, other_inode
)) {
1876 ret
= get_inode_info(sctx
->parent_root
, other_inode
, NULL
,
1877 who_gen
, who_mode
, NULL
, NULL
, NULL
);
1882 *who_ino
= other_inode
;
1892 * Checks if the ref was overwritten by an already processed inode. This is
1893 * used by __get_cur_name_and_parent to find out if the ref was orphanized and
1894 * thus the orphan name needs be used.
1895 * process_recorded_refs also uses it to avoid unlinking of refs that were
1898 static int did_overwrite_ref(struct send_ctx
*sctx
,
1899 u64 dir
, u64 dir_gen
,
1900 u64 ino
, u64 ino_gen
,
1901 const char *name
, int name_len
)
1908 if (!sctx
->parent_root
)
1911 ret
= is_inode_existent(sctx
, dir
, dir_gen
);
1915 if (dir
!= BTRFS_FIRST_FREE_OBJECTID
) {
1916 ret
= get_inode_info(sctx
->send_root
, dir
, NULL
, &gen
, NULL
,
1918 if (ret
< 0 && ret
!= -ENOENT
)
1928 /* check if the ref was overwritten by another ref */
1929 ret
= lookup_dir_item_inode(sctx
->send_root
, dir
, name
, name_len
,
1930 &ow_inode
, &other_type
);
1931 if (ret
< 0 && ret
!= -ENOENT
)
1934 /* was never and will never be overwritten */
1939 ret
= get_inode_info(sctx
->send_root
, ow_inode
, NULL
, &gen
, NULL
, NULL
,
1944 if (ow_inode
== ino
&& gen
== ino_gen
) {
1950 * We know that it is or will be overwritten. Check this now.
1951 * The current inode being processed might have been the one that caused
1952 * inode 'ino' to be orphanized, therefore check if ow_inode matches
1953 * the current inode being processed.
1955 if ((ow_inode
< sctx
->send_progress
) ||
1956 (ino
!= sctx
->cur_ino
&& ow_inode
== sctx
->cur_ino
&&
1957 gen
== sctx
->cur_inode_gen
))
1967 * Same as did_overwrite_ref, but also checks if it is the first ref of an inode
1968 * that got overwritten. This is used by process_recorded_refs to determine
1969 * if it has to use the path as returned by get_cur_path or the orphan name.
1971 static int did_overwrite_first_ref(struct send_ctx
*sctx
, u64 ino
, u64 gen
)
1974 struct fs_path
*name
= NULL
;
1978 if (!sctx
->parent_root
)
1981 name
= fs_path_alloc();
1985 ret
= get_first_ref(sctx
->parent_root
, ino
, &dir
, &dir_gen
, name
);
1989 ret
= did_overwrite_ref(sctx
, dir
, dir_gen
, ino
, gen
,
1990 name
->start
, fs_path_len(name
));
1998 * Insert a name cache entry. On 32bit kernels the radix tree index is 32bit,
1999 * so we need to do some special handling in case we have clashes. This function
2000 * takes care of this with the help of name_cache_entry::radix_list.
2001 * In case of error, nce is kfreed.
2003 static int name_cache_insert(struct send_ctx
*sctx
,
2004 struct name_cache_entry
*nce
)
2007 struct list_head
*nce_head
;
2009 nce_head
= radix_tree_lookup(&sctx
->name_cache
,
2010 (unsigned long)nce
->ino
);
2012 nce_head
= kmalloc(sizeof(*nce_head
), GFP_KERNEL
);
2017 INIT_LIST_HEAD(nce_head
);
2019 ret
= radix_tree_insert(&sctx
->name_cache
, nce
->ino
, nce_head
);
2026 list_add_tail(&nce
->radix_list
, nce_head
);
2027 list_add_tail(&nce
->list
, &sctx
->name_cache_list
);
2028 sctx
->name_cache_size
++;
2033 static void name_cache_delete(struct send_ctx
*sctx
,
2034 struct name_cache_entry
*nce
)
2036 struct list_head
*nce_head
;
2038 nce_head
= radix_tree_lookup(&sctx
->name_cache
,
2039 (unsigned long)nce
->ino
);
2041 btrfs_err(sctx
->send_root
->fs_info
,
2042 "name_cache_delete lookup failed ino %llu cache size %d, leaking memory",
2043 nce
->ino
, sctx
->name_cache_size
);
2046 list_del(&nce
->radix_list
);
2047 list_del(&nce
->list
);
2048 sctx
->name_cache_size
--;
2051 * We may not get to the final release of nce_head if the lookup fails
2053 if (nce_head
&& list_empty(nce_head
)) {
2054 radix_tree_delete(&sctx
->name_cache
, (unsigned long)nce
->ino
);
2059 static struct name_cache_entry
*name_cache_search(struct send_ctx
*sctx
,
2062 struct list_head
*nce_head
;
2063 struct name_cache_entry
*cur
;
2065 nce_head
= radix_tree_lookup(&sctx
->name_cache
, (unsigned long)ino
);
2069 list_for_each_entry(cur
, nce_head
, radix_list
) {
2070 if (cur
->ino
== ino
&& cur
->gen
== gen
)
2077 * Removes the entry from the list and adds it back to the end. This marks the
2078 * entry as recently used so that name_cache_clean_unused does not remove it.
2080 static void name_cache_used(struct send_ctx
*sctx
, struct name_cache_entry
*nce
)
2082 list_del(&nce
->list
);
2083 list_add_tail(&nce
->list
, &sctx
->name_cache_list
);
2087 * Remove some entries from the beginning of name_cache_list.
2089 static void name_cache_clean_unused(struct send_ctx
*sctx
)
2091 struct name_cache_entry
*nce
;
2093 if (sctx
->name_cache_size
< SEND_CTX_NAME_CACHE_CLEAN_SIZE
)
2096 while (sctx
->name_cache_size
> SEND_CTX_MAX_NAME_CACHE_SIZE
) {
2097 nce
= list_entry(sctx
->name_cache_list
.next
,
2098 struct name_cache_entry
, list
);
2099 name_cache_delete(sctx
, nce
);
2104 static void name_cache_free(struct send_ctx
*sctx
)
2106 struct name_cache_entry
*nce
;
2108 while (!list_empty(&sctx
->name_cache_list
)) {
2109 nce
= list_entry(sctx
->name_cache_list
.next
,
2110 struct name_cache_entry
, list
);
2111 name_cache_delete(sctx
, nce
);
2117 * Used by get_cur_path for each ref up to the root.
2118 * Returns 0 if it succeeded.
2119 * Returns 1 if the inode is not existent or got overwritten. In that case, the
2120 * name is an orphan name. This instructs get_cur_path to stop iterating. If 1
2121 * is returned, parent_ino/parent_gen are not guaranteed to be valid.
2122 * Returns <0 in case of error.
2124 static int __get_cur_name_and_parent(struct send_ctx
*sctx
,
2128 struct fs_path
*dest
)
2132 struct name_cache_entry
*nce
= NULL
;
2135 * First check if we already did a call to this function with the same
2136 * ino/gen. If yes, check if the cache entry is still up-to-date. If yes
2137 * return the cached result.
2139 nce
= name_cache_search(sctx
, ino
, gen
);
2141 if (ino
< sctx
->send_progress
&& nce
->need_later_update
) {
2142 name_cache_delete(sctx
, nce
);
2146 name_cache_used(sctx
, nce
);
2147 *parent_ino
= nce
->parent_ino
;
2148 *parent_gen
= nce
->parent_gen
;
2149 ret
= fs_path_add(dest
, nce
->name
, nce
->name_len
);
2158 * If the inode is not existent yet, add the orphan name and return 1.
2159 * This should only happen for the parent dir that we determine in
2162 ret
= is_inode_existent(sctx
, ino
, gen
);
2167 ret
= gen_unique_name(sctx
, ino
, gen
, dest
);
2175 * Depending on whether the inode was already processed or not, use
2176 * send_root or parent_root for ref lookup.
2178 if (ino
< sctx
->send_progress
)
2179 ret
= get_first_ref(sctx
->send_root
, ino
,
2180 parent_ino
, parent_gen
, dest
);
2182 ret
= get_first_ref(sctx
->parent_root
, ino
,
2183 parent_ino
, parent_gen
, dest
);
2188 * Check if the ref was overwritten by an inode's ref that was processed
2189 * earlier. If yes, treat as orphan and return 1.
2191 ret
= did_overwrite_ref(sctx
, *parent_ino
, *parent_gen
, ino
, gen
,
2192 dest
->start
, dest
->end
- dest
->start
);
2196 fs_path_reset(dest
);
2197 ret
= gen_unique_name(sctx
, ino
, gen
, dest
);
2205 * Store the result of the lookup in the name cache.
2207 nce
= kmalloc(sizeof(*nce
) + fs_path_len(dest
) + 1, GFP_KERNEL
);
2215 nce
->parent_ino
= *parent_ino
;
2216 nce
->parent_gen
= *parent_gen
;
2217 nce
->name_len
= fs_path_len(dest
);
2219 strcpy(nce
->name
, dest
->start
);
2221 if (ino
< sctx
->send_progress
)
2222 nce
->need_later_update
= 0;
2224 nce
->need_later_update
= 1;
2226 nce_ret
= name_cache_insert(sctx
, nce
);
2229 name_cache_clean_unused(sctx
);
2236 * Magic happens here. This function returns the first ref to an inode as it
2237 * would look like while receiving the stream at this point in time.
2238 * We walk the path up to the root. For every inode in between, we check if it
2239 * was already processed/sent. If yes, we continue with the parent as found
2240 * in send_root. If not, we continue with the parent as found in parent_root.
2241 * If we encounter an inode that was deleted at this point in time, we use the
2242 * inodes "orphan" name instead of the real name and stop. Same with new inodes
2243 * that were not created yet and overwritten inodes/refs.
2245 * When do we have have orphan inodes:
2246 * 1. When an inode is freshly created and thus no valid refs are available yet
2247 * 2. When a directory lost all it's refs (deleted) but still has dir items
2248 * inside which were not processed yet (pending for move/delete). If anyone
2249 * tried to get the path to the dir items, it would get a path inside that
2251 * 3. When an inode is moved around or gets new links, it may overwrite the ref
2252 * of an unprocessed inode. If in that case the first ref would be
2253 * overwritten, the overwritten inode gets "orphanized". Later when we
2254 * process this overwritten inode, it is restored at a new place by moving
2257 * sctx->send_progress tells this function at which point in time receiving
2260 static int get_cur_path(struct send_ctx
*sctx
, u64 ino
, u64 gen
,
2261 struct fs_path
*dest
)
2264 struct fs_path
*name
= NULL
;
2265 u64 parent_inode
= 0;
2269 name
= fs_path_alloc();
2276 fs_path_reset(dest
);
2278 while (!stop
&& ino
!= BTRFS_FIRST_FREE_OBJECTID
) {
2279 struct waiting_dir_move
*wdm
;
2281 fs_path_reset(name
);
2283 if (is_waiting_for_rm(sctx
, ino
)) {
2284 ret
= gen_unique_name(sctx
, ino
, gen
, name
);
2287 ret
= fs_path_add_path(dest
, name
);
2291 wdm
= get_waiting_dir_move(sctx
, ino
);
2292 if (wdm
&& wdm
->orphanized
) {
2293 ret
= gen_unique_name(sctx
, ino
, gen
, name
);
2296 ret
= get_first_ref(sctx
->parent_root
, ino
,
2297 &parent_inode
, &parent_gen
, name
);
2299 ret
= __get_cur_name_and_parent(sctx
, ino
, gen
,
2309 ret
= fs_path_add_path(dest
, name
);
2320 fs_path_unreverse(dest
);
2325 * Sends a BTRFS_SEND_C_SUBVOL command/item to userspace
2327 static int send_subvol_begin(struct send_ctx
*sctx
)
2330 struct btrfs_root
*send_root
= sctx
->send_root
;
2331 struct btrfs_root
*parent_root
= sctx
->parent_root
;
2332 struct btrfs_path
*path
;
2333 struct btrfs_key key
;
2334 struct btrfs_root_ref
*ref
;
2335 struct extent_buffer
*leaf
;
2339 path
= btrfs_alloc_path();
2343 name
= kmalloc(BTRFS_PATH_NAME_MAX
, GFP_KERNEL
);
2345 btrfs_free_path(path
);
2349 key
.objectid
= send_root
->objectid
;
2350 key
.type
= BTRFS_ROOT_BACKREF_KEY
;
2353 ret
= btrfs_search_slot_for_read(send_root
->fs_info
->tree_root
,
2362 leaf
= path
->nodes
[0];
2363 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
2364 if (key
.type
!= BTRFS_ROOT_BACKREF_KEY
||
2365 key
.objectid
!= send_root
->objectid
) {
2369 ref
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_root_ref
);
2370 namelen
= btrfs_root_ref_name_len(leaf
, ref
);
2371 read_extent_buffer(leaf
, name
, (unsigned long)(ref
+ 1), namelen
);
2372 btrfs_release_path(path
);
2375 ret
= begin_cmd(sctx
, BTRFS_SEND_C_SNAPSHOT
);
2379 ret
= begin_cmd(sctx
, BTRFS_SEND_C_SUBVOL
);
2384 TLV_PUT_STRING(sctx
, BTRFS_SEND_A_PATH
, name
, namelen
);
2386 if (!btrfs_is_empty_uuid(sctx
->send_root
->root_item
.received_uuid
))
2387 TLV_PUT_UUID(sctx
, BTRFS_SEND_A_UUID
,
2388 sctx
->send_root
->root_item
.received_uuid
);
2390 TLV_PUT_UUID(sctx
, BTRFS_SEND_A_UUID
,
2391 sctx
->send_root
->root_item
.uuid
);
2393 TLV_PUT_U64(sctx
, BTRFS_SEND_A_CTRANSID
,
2394 le64_to_cpu(sctx
->send_root
->root_item
.ctransid
));
2396 if (!btrfs_is_empty_uuid(parent_root
->root_item
.received_uuid
))
2397 TLV_PUT_UUID(sctx
, BTRFS_SEND_A_CLONE_UUID
,
2398 parent_root
->root_item
.received_uuid
);
2400 TLV_PUT_UUID(sctx
, BTRFS_SEND_A_CLONE_UUID
,
2401 parent_root
->root_item
.uuid
);
2402 TLV_PUT_U64(sctx
, BTRFS_SEND_A_CLONE_CTRANSID
,
2403 le64_to_cpu(sctx
->parent_root
->root_item
.ctransid
));
2406 ret
= send_cmd(sctx
);
2410 btrfs_free_path(path
);
2415 static int send_truncate(struct send_ctx
*sctx
, u64 ino
, u64 gen
, u64 size
)
2417 struct btrfs_fs_info
*fs_info
= sctx
->send_root
->fs_info
;
2421 btrfs_debug(fs_info
, "send_truncate %llu size=%llu", ino
, size
);
2423 p
= fs_path_alloc();
2427 ret
= begin_cmd(sctx
, BTRFS_SEND_C_TRUNCATE
);
2431 ret
= get_cur_path(sctx
, ino
, gen
, p
);
2434 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH
, p
);
2435 TLV_PUT_U64(sctx
, BTRFS_SEND_A_SIZE
, size
);
2437 ret
= send_cmd(sctx
);
2445 static int send_chmod(struct send_ctx
*sctx
, u64 ino
, u64 gen
, u64 mode
)
2447 struct btrfs_fs_info
*fs_info
= sctx
->send_root
->fs_info
;
2451 btrfs_debug(fs_info
, "send_chmod %llu mode=%llu", ino
, mode
);
2453 p
= fs_path_alloc();
2457 ret
= begin_cmd(sctx
, BTRFS_SEND_C_CHMOD
);
2461 ret
= get_cur_path(sctx
, ino
, gen
, p
);
2464 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH
, p
);
2465 TLV_PUT_U64(sctx
, BTRFS_SEND_A_MODE
, mode
& 07777);
2467 ret
= send_cmd(sctx
);
2475 static int send_chown(struct send_ctx
*sctx
, u64 ino
, u64 gen
, u64 uid
, u64 gid
)
2477 struct btrfs_fs_info
*fs_info
= sctx
->send_root
->fs_info
;
2481 btrfs_debug(fs_info
, "send_chown %llu uid=%llu, gid=%llu",
2484 p
= fs_path_alloc();
2488 ret
= begin_cmd(sctx
, BTRFS_SEND_C_CHOWN
);
2492 ret
= get_cur_path(sctx
, ino
, gen
, p
);
2495 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH
, p
);
2496 TLV_PUT_U64(sctx
, BTRFS_SEND_A_UID
, uid
);
2497 TLV_PUT_U64(sctx
, BTRFS_SEND_A_GID
, gid
);
2499 ret
= send_cmd(sctx
);
2507 static int send_utimes(struct send_ctx
*sctx
, u64 ino
, u64 gen
)
2509 struct btrfs_fs_info
*fs_info
= sctx
->send_root
->fs_info
;
2511 struct fs_path
*p
= NULL
;
2512 struct btrfs_inode_item
*ii
;
2513 struct btrfs_path
*path
= NULL
;
2514 struct extent_buffer
*eb
;
2515 struct btrfs_key key
;
2518 btrfs_debug(fs_info
, "send_utimes %llu", ino
);
2520 p
= fs_path_alloc();
2524 path
= alloc_path_for_send();
2531 key
.type
= BTRFS_INODE_ITEM_KEY
;
2533 ret
= btrfs_search_slot(NULL
, sctx
->send_root
, &key
, path
, 0, 0);
2539 eb
= path
->nodes
[0];
2540 slot
= path
->slots
[0];
2541 ii
= btrfs_item_ptr(eb
, slot
, struct btrfs_inode_item
);
2543 ret
= begin_cmd(sctx
, BTRFS_SEND_C_UTIMES
);
2547 ret
= get_cur_path(sctx
, ino
, gen
, p
);
2550 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH
, p
);
2551 TLV_PUT_BTRFS_TIMESPEC(sctx
, BTRFS_SEND_A_ATIME
, eb
, &ii
->atime
);
2552 TLV_PUT_BTRFS_TIMESPEC(sctx
, BTRFS_SEND_A_MTIME
, eb
, &ii
->mtime
);
2553 TLV_PUT_BTRFS_TIMESPEC(sctx
, BTRFS_SEND_A_CTIME
, eb
, &ii
->ctime
);
2554 /* TODO Add otime support when the otime patches get into upstream */
2556 ret
= send_cmd(sctx
);
2561 btrfs_free_path(path
);
2566 * Sends a BTRFS_SEND_C_MKXXX or SYMLINK command to user space. We don't have
2567 * a valid path yet because we did not process the refs yet. So, the inode
2568 * is created as orphan.
2570 static int send_create_inode(struct send_ctx
*sctx
, u64 ino
)
2572 struct btrfs_fs_info
*fs_info
= sctx
->send_root
->fs_info
;
2580 btrfs_debug(fs_info
, "send_create_inode %llu", ino
);
2582 p
= fs_path_alloc();
2586 if (ino
!= sctx
->cur_ino
) {
2587 ret
= get_inode_info(sctx
->send_root
, ino
, NULL
, &gen
, &mode
,
2592 gen
= sctx
->cur_inode_gen
;
2593 mode
= sctx
->cur_inode_mode
;
2594 rdev
= sctx
->cur_inode_rdev
;
2597 if (S_ISREG(mode
)) {
2598 cmd
= BTRFS_SEND_C_MKFILE
;
2599 } else if (S_ISDIR(mode
)) {
2600 cmd
= BTRFS_SEND_C_MKDIR
;
2601 } else if (S_ISLNK(mode
)) {
2602 cmd
= BTRFS_SEND_C_SYMLINK
;
2603 } else if (S_ISCHR(mode
) || S_ISBLK(mode
)) {
2604 cmd
= BTRFS_SEND_C_MKNOD
;
2605 } else if (S_ISFIFO(mode
)) {
2606 cmd
= BTRFS_SEND_C_MKFIFO
;
2607 } else if (S_ISSOCK(mode
)) {
2608 cmd
= BTRFS_SEND_C_MKSOCK
;
2610 btrfs_warn(sctx
->send_root
->fs_info
, "unexpected inode type %o",
2611 (int)(mode
& S_IFMT
));
2616 ret
= begin_cmd(sctx
, cmd
);
2620 ret
= gen_unique_name(sctx
, ino
, gen
, p
);
2624 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH
, p
);
2625 TLV_PUT_U64(sctx
, BTRFS_SEND_A_INO
, ino
);
2627 if (S_ISLNK(mode
)) {
2629 ret
= read_symlink(sctx
->send_root
, ino
, p
);
2632 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH_LINK
, p
);
2633 } else if (S_ISCHR(mode
) || S_ISBLK(mode
) ||
2634 S_ISFIFO(mode
) || S_ISSOCK(mode
)) {
2635 TLV_PUT_U64(sctx
, BTRFS_SEND_A_RDEV
, new_encode_dev(rdev
));
2636 TLV_PUT_U64(sctx
, BTRFS_SEND_A_MODE
, mode
);
2639 ret
= send_cmd(sctx
);
2651 * We need some special handling for inodes that get processed before the parent
2652 * directory got created. See process_recorded_refs for details.
2653 * This function does the check if we already created the dir out of order.
2655 static int did_create_dir(struct send_ctx
*sctx
, u64 dir
)
2658 struct btrfs_path
*path
= NULL
;
2659 struct btrfs_key key
;
2660 struct btrfs_key found_key
;
2661 struct btrfs_key di_key
;
2662 struct extent_buffer
*eb
;
2663 struct btrfs_dir_item
*di
;
2666 path
= alloc_path_for_send();
2673 key
.type
= BTRFS_DIR_INDEX_KEY
;
2675 ret
= btrfs_search_slot(NULL
, sctx
->send_root
, &key
, path
, 0, 0);
2680 eb
= path
->nodes
[0];
2681 slot
= path
->slots
[0];
2682 if (slot
>= btrfs_header_nritems(eb
)) {
2683 ret
= btrfs_next_leaf(sctx
->send_root
, path
);
2686 } else if (ret
> 0) {
2693 btrfs_item_key_to_cpu(eb
, &found_key
, slot
);
2694 if (found_key
.objectid
!= key
.objectid
||
2695 found_key
.type
!= key
.type
) {
2700 di
= btrfs_item_ptr(eb
, slot
, struct btrfs_dir_item
);
2701 btrfs_dir_item_key_to_cpu(eb
, di
, &di_key
);
2703 if (di_key
.type
!= BTRFS_ROOT_ITEM_KEY
&&
2704 di_key
.objectid
< sctx
->send_progress
) {
2713 btrfs_free_path(path
);
2718 * Only creates the inode if it is:
2719 * 1. Not a directory
2720 * 2. Or a directory which was not created already due to out of order
2721 * directories. See did_create_dir and process_recorded_refs for details.
2723 static int send_create_inode_if_needed(struct send_ctx
*sctx
)
2727 if (S_ISDIR(sctx
->cur_inode_mode
)) {
2728 ret
= did_create_dir(sctx
, sctx
->cur_ino
);
2737 ret
= send_create_inode(sctx
, sctx
->cur_ino
);
2745 struct recorded_ref
{
2746 struct list_head list
;
2748 struct fs_path
*full_path
;
2754 static void set_ref_path(struct recorded_ref
*ref
, struct fs_path
*path
)
2756 ref
->full_path
= path
;
2757 ref
->name
= (char *)kbasename(ref
->full_path
->start
);
2758 ref
->name_len
= ref
->full_path
->end
- ref
->name
;
2762 * We need to process new refs before deleted refs, but compare_tree gives us
2763 * everything mixed. So we first record all refs and later process them.
2764 * This function is a helper to record one ref.
2766 static int __record_ref(struct list_head
*head
, u64 dir
,
2767 u64 dir_gen
, struct fs_path
*path
)
2769 struct recorded_ref
*ref
;
2771 ref
= kmalloc(sizeof(*ref
), GFP_KERNEL
);
2776 ref
->dir_gen
= dir_gen
;
2777 set_ref_path(ref
, path
);
2778 list_add_tail(&ref
->list
, head
);
2782 static int dup_ref(struct recorded_ref
*ref
, struct list_head
*list
)
2784 struct recorded_ref
*new;
2786 new = kmalloc(sizeof(*ref
), GFP_KERNEL
);
2790 new->dir
= ref
->dir
;
2791 new->dir_gen
= ref
->dir_gen
;
2792 new->full_path
= NULL
;
2793 INIT_LIST_HEAD(&new->list
);
2794 list_add_tail(&new->list
, list
);
2798 static void __free_recorded_refs(struct list_head
*head
)
2800 struct recorded_ref
*cur
;
2802 while (!list_empty(head
)) {
2803 cur
= list_entry(head
->next
, struct recorded_ref
, list
);
2804 fs_path_free(cur
->full_path
);
2805 list_del(&cur
->list
);
2810 static void free_recorded_refs(struct send_ctx
*sctx
)
2812 __free_recorded_refs(&sctx
->new_refs
);
2813 __free_recorded_refs(&sctx
->deleted_refs
);
2817 * Renames/moves a file/dir to its orphan name. Used when the first
2818 * ref of an unprocessed inode gets overwritten and for all non empty
2821 static int orphanize_inode(struct send_ctx
*sctx
, u64 ino
, u64 gen
,
2822 struct fs_path
*path
)
2825 struct fs_path
*orphan
;
2827 orphan
= fs_path_alloc();
2831 ret
= gen_unique_name(sctx
, ino
, gen
, orphan
);
2835 ret
= send_rename(sctx
, path
, orphan
);
2838 fs_path_free(orphan
);
2842 static struct orphan_dir_info
*
2843 add_orphan_dir_info(struct send_ctx
*sctx
, u64 dir_ino
)
2845 struct rb_node
**p
= &sctx
->orphan_dirs
.rb_node
;
2846 struct rb_node
*parent
= NULL
;
2847 struct orphan_dir_info
*entry
, *odi
;
2851 entry
= rb_entry(parent
, struct orphan_dir_info
, node
);
2852 if (dir_ino
< entry
->ino
) {
2854 } else if (dir_ino
> entry
->ino
) {
2855 p
= &(*p
)->rb_right
;
2861 odi
= kmalloc(sizeof(*odi
), GFP_KERNEL
);
2863 return ERR_PTR(-ENOMEM
);
2866 odi
->last_dir_index_offset
= 0;
2868 rb_link_node(&odi
->node
, parent
, p
);
2869 rb_insert_color(&odi
->node
, &sctx
->orphan_dirs
);
2873 static struct orphan_dir_info
*
2874 get_orphan_dir_info(struct send_ctx
*sctx
, u64 dir_ino
)
2876 struct rb_node
*n
= sctx
->orphan_dirs
.rb_node
;
2877 struct orphan_dir_info
*entry
;
2880 entry
= rb_entry(n
, struct orphan_dir_info
, node
);
2881 if (dir_ino
< entry
->ino
)
2883 else if (dir_ino
> entry
->ino
)
2891 static int is_waiting_for_rm(struct send_ctx
*sctx
, u64 dir_ino
)
2893 struct orphan_dir_info
*odi
= get_orphan_dir_info(sctx
, dir_ino
);
2898 static void free_orphan_dir_info(struct send_ctx
*sctx
,
2899 struct orphan_dir_info
*odi
)
2903 rb_erase(&odi
->node
, &sctx
->orphan_dirs
);
2908 * Returns 1 if a directory can be removed at this point in time.
2909 * We check this by iterating all dir items and checking if the inode behind
2910 * the dir item was already processed.
2912 static int can_rmdir(struct send_ctx
*sctx
, u64 dir
, u64 dir_gen
,
2916 struct btrfs_root
*root
= sctx
->parent_root
;
2917 struct btrfs_path
*path
;
2918 struct btrfs_key key
;
2919 struct btrfs_key found_key
;
2920 struct btrfs_key loc
;
2921 struct btrfs_dir_item
*di
;
2922 struct orphan_dir_info
*odi
= NULL
;
2925 * Don't try to rmdir the top/root subvolume dir.
2927 if (dir
== BTRFS_FIRST_FREE_OBJECTID
)
2930 path
= alloc_path_for_send();
2935 key
.type
= BTRFS_DIR_INDEX_KEY
;
2938 odi
= get_orphan_dir_info(sctx
, dir
);
2940 key
.offset
= odi
->last_dir_index_offset
;
2942 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
2947 struct waiting_dir_move
*dm
;
2949 if (path
->slots
[0] >= btrfs_header_nritems(path
->nodes
[0])) {
2950 ret
= btrfs_next_leaf(root
, path
);
2957 btrfs_item_key_to_cpu(path
->nodes
[0], &found_key
,
2959 if (found_key
.objectid
!= key
.objectid
||
2960 found_key
.type
!= key
.type
)
2963 di
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
2964 struct btrfs_dir_item
);
2965 btrfs_dir_item_key_to_cpu(path
->nodes
[0], di
, &loc
);
2967 dm
= get_waiting_dir_move(sctx
, loc
.objectid
);
2969 odi
= add_orphan_dir_info(sctx
, dir
);
2975 odi
->last_dir_index_offset
= found_key
.offset
;
2976 dm
->rmdir_ino
= dir
;
2981 if (loc
.objectid
> send_progress
) {
2982 odi
= add_orphan_dir_info(sctx
, dir
);
2988 odi
->last_dir_index_offset
= found_key
.offset
;
2995 free_orphan_dir_info(sctx
, odi
);
3000 btrfs_free_path(path
);
3004 static int is_waiting_for_move(struct send_ctx
*sctx
, u64 ino
)
3006 struct waiting_dir_move
*entry
= get_waiting_dir_move(sctx
, ino
);
3008 return entry
!= NULL
;
3011 static int add_waiting_dir_move(struct send_ctx
*sctx
, u64 ino
, bool orphanized
)
3013 struct rb_node
**p
= &sctx
->waiting_dir_moves
.rb_node
;
3014 struct rb_node
*parent
= NULL
;
3015 struct waiting_dir_move
*entry
, *dm
;
3017 dm
= kmalloc(sizeof(*dm
), GFP_KERNEL
);
3022 dm
->orphanized
= orphanized
;
3026 entry
= rb_entry(parent
, struct waiting_dir_move
, node
);
3027 if (ino
< entry
->ino
) {
3029 } else if (ino
> entry
->ino
) {
3030 p
= &(*p
)->rb_right
;
3037 rb_link_node(&dm
->node
, parent
, p
);
3038 rb_insert_color(&dm
->node
, &sctx
->waiting_dir_moves
);
3042 static struct waiting_dir_move
*
3043 get_waiting_dir_move(struct send_ctx
*sctx
, u64 ino
)
3045 struct rb_node
*n
= sctx
->waiting_dir_moves
.rb_node
;
3046 struct waiting_dir_move
*entry
;
3049 entry
= rb_entry(n
, struct waiting_dir_move
, node
);
3050 if (ino
< entry
->ino
)
3052 else if (ino
> entry
->ino
)
3060 static void free_waiting_dir_move(struct send_ctx
*sctx
,
3061 struct waiting_dir_move
*dm
)
3065 rb_erase(&dm
->node
, &sctx
->waiting_dir_moves
);
3069 static int add_pending_dir_move(struct send_ctx
*sctx
,
3073 struct list_head
*new_refs
,
3074 struct list_head
*deleted_refs
,
3075 const bool is_orphan
)
3077 struct rb_node
**p
= &sctx
->pending_dir_moves
.rb_node
;
3078 struct rb_node
*parent
= NULL
;
3079 struct pending_dir_move
*entry
= NULL
, *pm
;
3080 struct recorded_ref
*cur
;
3084 pm
= kmalloc(sizeof(*pm
), GFP_KERNEL
);
3087 pm
->parent_ino
= parent_ino
;
3090 INIT_LIST_HEAD(&pm
->list
);
3091 INIT_LIST_HEAD(&pm
->update_refs
);
3092 RB_CLEAR_NODE(&pm
->node
);
3096 entry
= rb_entry(parent
, struct pending_dir_move
, node
);
3097 if (parent_ino
< entry
->parent_ino
) {
3099 } else if (parent_ino
> entry
->parent_ino
) {
3100 p
= &(*p
)->rb_right
;
3107 list_for_each_entry(cur
, deleted_refs
, list
) {
3108 ret
= dup_ref(cur
, &pm
->update_refs
);
3112 list_for_each_entry(cur
, new_refs
, list
) {
3113 ret
= dup_ref(cur
, &pm
->update_refs
);
3118 ret
= add_waiting_dir_move(sctx
, pm
->ino
, is_orphan
);
3123 list_add_tail(&pm
->list
, &entry
->list
);
3125 rb_link_node(&pm
->node
, parent
, p
);
3126 rb_insert_color(&pm
->node
, &sctx
->pending_dir_moves
);
3131 __free_recorded_refs(&pm
->update_refs
);
3137 static struct pending_dir_move
*get_pending_dir_moves(struct send_ctx
*sctx
,
3140 struct rb_node
*n
= sctx
->pending_dir_moves
.rb_node
;
3141 struct pending_dir_move
*entry
;
3144 entry
= rb_entry(n
, struct pending_dir_move
, node
);
3145 if (parent_ino
< entry
->parent_ino
)
3147 else if (parent_ino
> entry
->parent_ino
)
3155 static int path_loop(struct send_ctx
*sctx
, struct fs_path
*name
,
3156 u64 ino
, u64 gen
, u64
*ancestor_ino
)
3159 u64 parent_inode
= 0;
3161 u64 start_ino
= ino
;
3164 while (ino
!= BTRFS_FIRST_FREE_OBJECTID
) {
3165 fs_path_reset(name
);
3167 if (is_waiting_for_rm(sctx
, ino
))
3169 if (is_waiting_for_move(sctx
, ino
)) {
3170 if (*ancestor_ino
== 0)
3171 *ancestor_ino
= ino
;
3172 ret
= get_first_ref(sctx
->parent_root
, ino
,
3173 &parent_inode
, &parent_gen
, name
);
3175 ret
= __get_cur_name_and_parent(sctx
, ino
, gen
,
3185 if (parent_inode
== start_ino
) {
3187 if (*ancestor_ino
== 0)
3188 *ancestor_ino
= ino
;
3197 static int apply_dir_move(struct send_ctx
*sctx
, struct pending_dir_move
*pm
)
3199 struct fs_path
*from_path
= NULL
;
3200 struct fs_path
*to_path
= NULL
;
3201 struct fs_path
*name
= NULL
;
3202 u64 orig_progress
= sctx
->send_progress
;
3203 struct recorded_ref
*cur
;
3204 u64 parent_ino
, parent_gen
;
3205 struct waiting_dir_move
*dm
= NULL
;
3211 name
= fs_path_alloc();
3212 from_path
= fs_path_alloc();
3213 if (!name
|| !from_path
) {
3218 dm
= get_waiting_dir_move(sctx
, pm
->ino
);
3220 rmdir_ino
= dm
->rmdir_ino
;
3221 is_orphan
= dm
->orphanized
;
3222 free_waiting_dir_move(sctx
, dm
);
3225 ret
= gen_unique_name(sctx
, pm
->ino
,
3226 pm
->gen
, from_path
);
3228 ret
= get_first_ref(sctx
->parent_root
, pm
->ino
,
3229 &parent_ino
, &parent_gen
, name
);
3232 ret
= get_cur_path(sctx
, parent_ino
, parent_gen
,
3236 ret
= fs_path_add_path(from_path
, name
);
3241 sctx
->send_progress
= sctx
->cur_ino
+ 1;
3242 ret
= path_loop(sctx
, name
, pm
->ino
, pm
->gen
, &ancestor
);
3246 LIST_HEAD(deleted_refs
);
3247 ASSERT(ancestor
> BTRFS_FIRST_FREE_OBJECTID
);
3248 ret
= add_pending_dir_move(sctx
, pm
->ino
, pm
->gen
, ancestor
,
3249 &pm
->update_refs
, &deleted_refs
,
3254 dm
= get_waiting_dir_move(sctx
, pm
->ino
);
3256 dm
->rmdir_ino
= rmdir_ino
;
3260 fs_path_reset(name
);
3263 ret
= get_cur_path(sctx
, pm
->ino
, pm
->gen
, to_path
);
3267 ret
= send_rename(sctx
, from_path
, to_path
);
3272 struct orphan_dir_info
*odi
;
3275 odi
= get_orphan_dir_info(sctx
, rmdir_ino
);
3277 /* already deleted */
3282 ret
= can_rmdir(sctx
, rmdir_ino
, gen
, sctx
->cur_ino
);
3288 name
= fs_path_alloc();
3293 ret
= get_cur_path(sctx
, rmdir_ino
, gen
, name
);
3296 ret
= send_rmdir(sctx
, name
);
3302 ret
= send_utimes(sctx
, pm
->ino
, pm
->gen
);
3307 * After rename/move, need to update the utimes of both new parent(s)
3308 * and old parent(s).
3310 list_for_each_entry(cur
, &pm
->update_refs
, list
) {
3312 * The parent inode might have been deleted in the send snapshot
3314 ret
= get_inode_info(sctx
->send_root
, cur
->dir
, NULL
,
3315 NULL
, NULL
, NULL
, NULL
, NULL
);
3316 if (ret
== -ENOENT
) {
3323 ret
= send_utimes(sctx
, cur
->dir
, cur
->dir_gen
);
3330 fs_path_free(from_path
);
3331 fs_path_free(to_path
);
3332 sctx
->send_progress
= orig_progress
;
3337 static void free_pending_move(struct send_ctx
*sctx
, struct pending_dir_move
*m
)
3339 if (!list_empty(&m
->list
))
3341 if (!RB_EMPTY_NODE(&m
->node
))
3342 rb_erase(&m
->node
, &sctx
->pending_dir_moves
);
3343 __free_recorded_refs(&m
->update_refs
);
3347 static void tail_append_pending_moves(struct pending_dir_move
*moves
,
3348 struct list_head
*stack
)
3350 if (list_empty(&moves
->list
)) {
3351 list_add_tail(&moves
->list
, stack
);
3354 list_splice_init(&moves
->list
, &list
);
3355 list_add_tail(&moves
->list
, stack
);
3356 list_splice_tail(&list
, stack
);
3360 static int apply_children_dir_moves(struct send_ctx
*sctx
)
3362 struct pending_dir_move
*pm
;
3363 struct list_head stack
;
3364 u64 parent_ino
= sctx
->cur_ino
;
3367 pm
= get_pending_dir_moves(sctx
, parent_ino
);
3371 INIT_LIST_HEAD(&stack
);
3372 tail_append_pending_moves(pm
, &stack
);
3374 while (!list_empty(&stack
)) {
3375 pm
= list_first_entry(&stack
, struct pending_dir_move
, list
);
3376 parent_ino
= pm
->ino
;
3377 ret
= apply_dir_move(sctx
, pm
);
3378 free_pending_move(sctx
, pm
);
3381 pm
= get_pending_dir_moves(sctx
, parent_ino
);
3383 tail_append_pending_moves(pm
, &stack
);
3388 while (!list_empty(&stack
)) {
3389 pm
= list_first_entry(&stack
, struct pending_dir_move
, list
);
3390 free_pending_move(sctx
, pm
);
3396 * We might need to delay a directory rename even when no ancestor directory
3397 * (in the send root) with a higher inode number than ours (sctx->cur_ino) was
3398 * renamed. This happens when we rename a directory to the old name (the name
3399 * in the parent root) of some other unrelated directory that got its rename
3400 * delayed due to some ancestor with higher number that got renamed.
3406 * |---- a/ (ino 257)
3407 * | |---- file (ino 260)
3409 * |---- b/ (ino 258)
3410 * |---- c/ (ino 259)
3414 * |---- a/ (ino 258)
3415 * |---- x/ (ino 259)
3416 * |---- y/ (ino 257)
3417 * |----- file (ino 260)
3419 * Here we can not rename 258 from 'b' to 'a' without the rename of inode 257
3420 * from 'a' to 'x/y' happening first, which in turn depends on the rename of
3421 * inode 259 from 'c' to 'x'. So the order of rename commands the send stream
3424 * 1 - rename 259 from 'c' to 'x'
3425 * 2 - rename 257 from 'a' to 'x/y'
3426 * 3 - rename 258 from 'b' to 'a'
3428 * Returns 1 if the rename of sctx->cur_ino needs to be delayed, 0 if it can
3429 * be done right away and < 0 on error.
3431 static int wait_for_dest_dir_move(struct send_ctx
*sctx
,
3432 struct recorded_ref
*parent_ref
,
3433 const bool is_orphan
)
3435 struct btrfs_fs_info
*fs_info
= sctx
->parent_root
->fs_info
;
3436 struct btrfs_path
*path
;
3437 struct btrfs_key key
;
3438 struct btrfs_key di_key
;
3439 struct btrfs_dir_item
*di
;
3443 struct waiting_dir_move
*wdm
;
3445 if (RB_EMPTY_ROOT(&sctx
->waiting_dir_moves
))
3448 path
= alloc_path_for_send();
3452 key
.objectid
= parent_ref
->dir
;
3453 key
.type
= BTRFS_DIR_ITEM_KEY
;
3454 key
.offset
= btrfs_name_hash(parent_ref
->name
, parent_ref
->name_len
);
3456 ret
= btrfs_search_slot(NULL
, sctx
->parent_root
, &key
, path
, 0, 0);
3459 } else if (ret
> 0) {
3464 di
= btrfs_match_dir_item_name(fs_info
, path
, parent_ref
->name
,
3465 parent_ref
->name_len
);
3471 * di_key.objectid has the number of the inode that has a dentry in the
3472 * parent directory with the same name that sctx->cur_ino is being
3473 * renamed to. We need to check if that inode is in the send root as
3474 * well and if it is currently marked as an inode with a pending rename,
3475 * if it is, we need to delay the rename of sctx->cur_ino as well, so
3476 * that it happens after that other inode is renamed.
3478 btrfs_dir_item_key_to_cpu(path
->nodes
[0], di
, &di_key
);
3479 if (di_key
.type
!= BTRFS_INODE_ITEM_KEY
) {
3484 ret
= get_inode_info(sctx
->parent_root
, di_key
.objectid
, NULL
,
3485 &left_gen
, NULL
, NULL
, NULL
, NULL
);
3488 ret
= get_inode_info(sctx
->send_root
, di_key
.objectid
, NULL
,
3489 &right_gen
, NULL
, NULL
, NULL
, NULL
);
3496 /* Different inode, no need to delay the rename of sctx->cur_ino */
3497 if (right_gen
!= left_gen
) {
3502 wdm
= get_waiting_dir_move(sctx
, di_key
.objectid
);
3503 if (wdm
&& !wdm
->orphanized
) {
3504 ret
= add_pending_dir_move(sctx
,
3506 sctx
->cur_inode_gen
,
3509 &sctx
->deleted_refs
,
3515 btrfs_free_path(path
);
3520 * Check if inode ino2, or any of its ancestors, is inode ino1.
3521 * Return 1 if true, 0 if false and < 0 on error.
3523 static int check_ino_in_path(struct btrfs_root
*root
,
3528 struct fs_path
*fs_path
)
3533 return ino1_gen
== ino2_gen
;
3535 while (ino
> BTRFS_FIRST_FREE_OBJECTID
) {
3540 fs_path_reset(fs_path
);
3541 ret
= get_first_ref(root
, ino
, &parent
, &parent_gen
, fs_path
);
3545 return parent_gen
== ino1_gen
;
3552 * Check if ino ino1 is an ancestor of inode ino2 in the given root for any
3553 * possible path (in case ino2 is not a directory and has multiple hard links).
3554 * Return 1 if true, 0 if false and < 0 on error.
3556 static int is_ancestor(struct btrfs_root
*root
,
3560 struct fs_path
*fs_path
)
3562 bool free_fs_path
= false;
3564 struct btrfs_path
*path
= NULL
;
3565 struct btrfs_key key
;
3568 fs_path
= fs_path_alloc();
3571 free_fs_path
= true;
3574 path
= alloc_path_for_send();
3580 key
.objectid
= ino2
;
3581 key
.type
= BTRFS_INODE_REF_KEY
;
3584 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
3589 struct extent_buffer
*leaf
= path
->nodes
[0];
3590 int slot
= path
->slots
[0];
3594 if (slot
>= btrfs_header_nritems(leaf
)) {
3595 ret
= btrfs_next_leaf(root
, path
);
3603 btrfs_item_key_to_cpu(leaf
, &key
, slot
);
3604 if (key
.objectid
!= ino2
)
3606 if (key
.type
!= BTRFS_INODE_REF_KEY
&&
3607 key
.type
!= BTRFS_INODE_EXTREF_KEY
)
3610 item_size
= btrfs_item_size_nr(leaf
, slot
);
3611 while (cur_offset
< item_size
) {
3615 if (key
.type
== BTRFS_INODE_EXTREF_KEY
) {
3617 struct btrfs_inode_extref
*extref
;
3619 ptr
= btrfs_item_ptr_offset(leaf
, slot
);
3620 extref
= (struct btrfs_inode_extref
*)
3622 parent
= btrfs_inode_extref_parent(leaf
,
3624 cur_offset
+= sizeof(*extref
);
3625 cur_offset
+= btrfs_inode_extref_name_len(leaf
,
3628 parent
= key
.offset
;
3629 cur_offset
= item_size
;
3632 ret
= get_inode_info(root
, parent
, NULL
, &parent_gen
,
3633 NULL
, NULL
, NULL
, NULL
);
3636 ret
= check_ino_in_path(root
, ino1
, ino1_gen
,
3637 parent
, parent_gen
, fs_path
);
3645 btrfs_free_path(path
);
3647 fs_path_free(fs_path
);
3651 static int wait_for_parent_move(struct send_ctx
*sctx
,
3652 struct recorded_ref
*parent_ref
,
3653 const bool is_orphan
)
3656 u64 ino
= parent_ref
->dir
;
3657 u64 ino_gen
= parent_ref
->dir_gen
;
3658 u64 parent_ino_before
, parent_ino_after
;
3659 struct fs_path
*path_before
= NULL
;
3660 struct fs_path
*path_after
= NULL
;
3663 path_after
= fs_path_alloc();
3664 path_before
= fs_path_alloc();
3665 if (!path_after
|| !path_before
) {
3671 * Our current directory inode may not yet be renamed/moved because some
3672 * ancestor (immediate or not) has to be renamed/moved first. So find if
3673 * such ancestor exists and make sure our own rename/move happens after
3674 * that ancestor is processed to avoid path build infinite loops (done
3675 * at get_cur_path()).
3677 while (ino
> BTRFS_FIRST_FREE_OBJECTID
) {
3678 u64 parent_ino_after_gen
;
3680 if (is_waiting_for_move(sctx
, ino
)) {
3682 * If the current inode is an ancestor of ino in the
3683 * parent root, we need to delay the rename of the
3684 * current inode, otherwise don't delayed the rename
3685 * because we can end up with a circular dependency
3686 * of renames, resulting in some directories never
3687 * getting the respective rename operations issued in
3688 * the send stream or getting into infinite path build
3691 ret
= is_ancestor(sctx
->parent_root
,
3692 sctx
->cur_ino
, sctx
->cur_inode_gen
,
3698 fs_path_reset(path_before
);
3699 fs_path_reset(path_after
);
3701 ret
= get_first_ref(sctx
->send_root
, ino
, &parent_ino_after
,
3702 &parent_ino_after_gen
, path_after
);
3705 ret
= get_first_ref(sctx
->parent_root
, ino
, &parent_ino_before
,
3707 if (ret
< 0 && ret
!= -ENOENT
) {
3709 } else if (ret
== -ENOENT
) {
3714 len1
= fs_path_len(path_before
);
3715 len2
= fs_path_len(path_after
);
3716 if (ino
> sctx
->cur_ino
&&
3717 (parent_ino_before
!= parent_ino_after
|| len1
!= len2
||
3718 memcmp(path_before
->start
, path_after
->start
, len1
))) {
3721 ret
= get_inode_info(sctx
->parent_root
, ino
, NULL
,
3722 &parent_ino_gen
, NULL
, NULL
, NULL
,
3726 if (ino_gen
== parent_ino_gen
) {
3731 ino
= parent_ino_after
;
3732 ino_gen
= parent_ino_after_gen
;
3736 fs_path_free(path_before
);
3737 fs_path_free(path_after
);
3740 ret
= add_pending_dir_move(sctx
,
3742 sctx
->cur_inode_gen
,
3745 &sctx
->deleted_refs
,
3754 static int update_ref_path(struct send_ctx
*sctx
, struct recorded_ref
*ref
)
3757 struct fs_path
*new_path
;
3760 * Our reference's name member points to its full_path member string, so
3761 * we use here a new path.
3763 new_path
= fs_path_alloc();
3767 ret
= get_cur_path(sctx
, ref
->dir
, ref
->dir_gen
, new_path
);
3769 fs_path_free(new_path
);
3772 ret
= fs_path_add(new_path
, ref
->name
, ref
->name_len
);
3774 fs_path_free(new_path
);
3778 fs_path_free(ref
->full_path
);
3779 set_ref_path(ref
, new_path
);
3785 * This does all the move/link/unlink/rmdir magic.
3787 static int process_recorded_refs(struct send_ctx
*sctx
, int *pending_move
)
3789 struct btrfs_fs_info
*fs_info
= sctx
->send_root
->fs_info
;
3791 struct recorded_ref
*cur
;
3792 struct recorded_ref
*cur2
;
3793 struct list_head check_dirs
;
3794 struct fs_path
*valid_path
= NULL
;
3798 int did_overwrite
= 0;
3800 u64 last_dir_ino_rm
= 0;
3801 bool can_rename
= true;
3802 bool orphanized_dir
= false;
3803 bool orphanized_ancestor
= false;
3805 btrfs_debug(fs_info
, "process_recorded_refs %llu", sctx
->cur_ino
);
3808 * This should never happen as the root dir always has the same ref
3809 * which is always '..'
3811 BUG_ON(sctx
->cur_ino
<= BTRFS_FIRST_FREE_OBJECTID
);
3812 INIT_LIST_HEAD(&check_dirs
);
3814 valid_path
= fs_path_alloc();
3821 * First, check if the first ref of the current inode was overwritten
3822 * before. If yes, we know that the current inode was already orphanized
3823 * and thus use the orphan name. If not, we can use get_cur_path to
3824 * get the path of the first ref as it would like while receiving at
3825 * this point in time.
3826 * New inodes are always orphan at the beginning, so force to use the
3827 * orphan name in this case.
3828 * The first ref is stored in valid_path and will be updated if it
3829 * gets moved around.
3831 if (!sctx
->cur_inode_new
) {
3832 ret
= did_overwrite_first_ref(sctx
, sctx
->cur_ino
,
3833 sctx
->cur_inode_gen
);
3839 if (sctx
->cur_inode_new
|| did_overwrite
) {
3840 ret
= gen_unique_name(sctx
, sctx
->cur_ino
,
3841 sctx
->cur_inode_gen
, valid_path
);
3846 ret
= get_cur_path(sctx
, sctx
->cur_ino
, sctx
->cur_inode_gen
,
3852 list_for_each_entry(cur
, &sctx
->new_refs
, list
) {
3854 * We may have refs where the parent directory does not exist
3855 * yet. This happens if the parent directories inum is higher
3856 * the the current inum. To handle this case, we create the
3857 * parent directory out of order. But we need to check if this
3858 * did already happen before due to other refs in the same dir.
3860 ret
= get_cur_inode_state(sctx
, cur
->dir
, cur
->dir_gen
);
3863 if (ret
== inode_state_will_create
) {
3866 * First check if any of the current inodes refs did
3867 * already create the dir.
3869 list_for_each_entry(cur2
, &sctx
->new_refs
, list
) {
3872 if (cur2
->dir
== cur
->dir
) {
3879 * If that did not happen, check if a previous inode
3880 * did already create the dir.
3883 ret
= did_create_dir(sctx
, cur
->dir
);
3887 ret
= send_create_inode(sctx
, cur
->dir
);
3894 * Check if this new ref would overwrite the first ref of
3895 * another unprocessed inode. If yes, orphanize the
3896 * overwritten inode. If we find an overwritten ref that is
3897 * not the first ref, simply unlink it.
3899 ret
= will_overwrite_ref(sctx
, cur
->dir
, cur
->dir_gen
,
3900 cur
->name
, cur
->name_len
,
3901 &ow_inode
, &ow_gen
, &ow_mode
);
3905 ret
= is_first_ref(sctx
->parent_root
,
3906 ow_inode
, cur
->dir
, cur
->name
,
3911 struct name_cache_entry
*nce
;
3912 struct waiting_dir_move
*wdm
;
3914 ret
= orphanize_inode(sctx
, ow_inode
, ow_gen
,
3918 if (S_ISDIR(ow_mode
))
3919 orphanized_dir
= true;
3922 * If ow_inode has its rename operation delayed
3923 * make sure that its orphanized name is used in
3924 * the source path when performing its rename
3927 if (is_waiting_for_move(sctx
, ow_inode
)) {
3928 wdm
= get_waiting_dir_move(sctx
,
3931 wdm
->orphanized
= true;
3935 * Make sure we clear our orphanized inode's
3936 * name from the name cache. This is because the
3937 * inode ow_inode might be an ancestor of some
3938 * other inode that will be orphanized as well
3939 * later and has an inode number greater than
3940 * sctx->send_progress. We need to prevent
3941 * future name lookups from using the old name
3942 * and get instead the orphan name.
3944 nce
= name_cache_search(sctx
, ow_inode
, ow_gen
);
3946 name_cache_delete(sctx
, nce
);
3951 * ow_inode might currently be an ancestor of
3952 * cur_ino, therefore compute valid_path (the
3953 * current path of cur_ino) again because it
3954 * might contain the pre-orphanization name of
3955 * ow_inode, which is no longer valid.
3957 ret
= is_ancestor(sctx
->parent_root
,
3959 sctx
->cur_ino
, NULL
);
3961 orphanized_ancestor
= true;
3962 fs_path_reset(valid_path
);
3963 ret
= get_cur_path(sctx
, sctx
->cur_ino
,
3964 sctx
->cur_inode_gen
,
3970 ret
= send_unlink(sctx
, cur
->full_path
);
3976 if (S_ISDIR(sctx
->cur_inode_mode
) && sctx
->parent_root
) {
3977 ret
= wait_for_dest_dir_move(sctx
, cur
, is_orphan
);
3986 if (S_ISDIR(sctx
->cur_inode_mode
) && sctx
->parent_root
&&
3988 ret
= wait_for_parent_move(sctx
, cur
, is_orphan
);
3998 * link/move the ref to the new place. If we have an orphan
3999 * inode, move it and update valid_path. If not, link or move
4000 * it depending on the inode mode.
4002 if (is_orphan
&& can_rename
) {
4003 ret
= send_rename(sctx
, valid_path
, cur
->full_path
);
4007 ret
= fs_path_copy(valid_path
, cur
->full_path
);
4010 } else if (can_rename
) {
4011 if (S_ISDIR(sctx
->cur_inode_mode
)) {
4013 * Dirs can't be linked, so move it. For moved
4014 * dirs, we always have one new and one deleted
4015 * ref. The deleted ref is ignored later.
4017 ret
= send_rename(sctx
, valid_path
,
4020 ret
= fs_path_copy(valid_path
,
4026 * We might have previously orphanized an inode
4027 * which is an ancestor of our current inode,
4028 * so our reference's full path, which was
4029 * computed before any such orphanizations, must
4032 if (orphanized_dir
) {
4033 ret
= update_ref_path(sctx
, cur
);
4037 ret
= send_link(sctx
, cur
->full_path
,
4043 ret
= dup_ref(cur
, &check_dirs
);
4048 if (S_ISDIR(sctx
->cur_inode_mode
) && sctx
->cur_inode_deleted
) {
4050 * Check if we can already rmdir the directory. If not,
4051 * orphanize it. For every dir item inside that gets deleted
4052 * later, we do this check again and rmdir it then if possible.
4053 * See the use of check_dirs for more details.
4055 ret
= can_rmdir(sctx
, sctx
->cur_ino
, sctx
->cur_inode_gen
,
4060 ret
= send_rmdir(sctx
, valid_path
);
4063 } else if (!is_orphan
) {
4064 ret
= orphanize_inode(sctx
, sctx
->cur_ino
,
4065 sctx
->cur_inode_gen
, valid_path
);
4071 list_for_each_entry(cur
, &sctx
->deleted_refs
, list
) {
4072 ret
= dup_ref(cur
, &check_dirs
);
4076 } else if (S_ISDIR(sctx
->cur_inode_mode
) &&
4077 !list_empty(&sctx
->deleted_refs
)) {
4079 * We have a moved dir. Add the old parent to check_dirs
4081 cur
= list_entry(sctx
->deleted_refs
.next
, struct recorded_ref
,
4083 ret
= dup_ref(cur
, &check_dirs
);
4086 } else if (!S_ISDIR(sctx
->cur_inode_mode
)) {
4088 * We have a non dir inode. Go through all deleted refs and
4089 * unlink them if they were not already overwritten by other
4092 list_for_each_entry(cur
, &sctx
->deleted_refs
, list
) {
4093 ret
= did_overwrite_ref(sctx
, cur
->dir
, cur
->dir_gen
,
4094 sctx
->cur_ino
, sctx
->cur_inode_gen
,
4095 cur
->name
, cur
->name_len
);
4100 * If we orphanized any ancestor before, we need
4101 * to recompute the full path for deleted names,
4102 * since any such path was computed before we
4103 * processed any references and orphanized any
4106 if (orphanized_ancestor
) {
4107 ret
= update_ref_path(sctx
, cur
);
4111 ret
= send_unlink(sctx
, cur
->full_path
);
4115 ret
= dup_ref(cur
, &check_dirs
);
4120 * If the inode is still orphan, unlink the orphan. This may
4121 * happen when a previous inode did overwrite the first ref
4122 * of this inode and no new refs were added for the current
4123 * inode. Unlinking does not mean that the inode is deleted in
4124 * all cases. There may still be links to this inode in other
4128 ret
= send_unlink(sctx
, valid_path
);
4135 * We did collect all parent dirs where cur_inode was once located. We
4136 * now go through all these dirs and check if they are pending for
4137 * deletion and if it's finally possible to perform the rmdir now.
4138 * We also update the inode stats of the parent dirs here.
4140 list_for_each_entry(cur
, &check_dirs
, list
) {
4142 * In case we had refs into dirs that were not processed yet,
4143 * we don't need to do the utime and rmdir logic for these dirs.
4144 * The dir will be processed later.
4146 if (cur
->dir
> sctx
->cur_ino
)
4149 ret
= get_cur_inode_state(sctx
, cur
->dir
, cur
->dir_gen
);
4153 if (ret
== inode_state_did_create
||
4154 ret
== inode_state_no_change
) {
4155 /* TODO delayed utimes */
4156 ret
= send_utimes(sctx
, cur
->dir
, cur
->dir_gen
);
4159 } else if (ret
== inode_state_did_delete
&&
4160 cur
->dir
!= last_dir_ino_rm
) {
4161 ret
= can_rmdir(sctx
, cur
->dir
, cur
->dir_gen
,
4166 ret
= get_cur_path(sctx
, cur
->dir
,
4167 cur
->dir_gen
, valid_path
);
4170 ret
= send_rmdir(sctx
, valid_path
);
4173 last_dir_ino_rm
= cur
->dir
;
4181 __free_recorded_refs(&check_dirs
);
4182 free_recorded_refs(sctx
);
4183 fs_path_free(valid_path
);
4187 static int record_ref(struct btrfs_root
*root
, u64 dir
, struct fs_path
*name
,
4188 void *ctx
, struct list_head
*refs
)
4191 struct send_ctx
*sctx
= ctx
;
4195 p
= fs_path_alloc();
4199 ret
= get_inode_info(root
, dir
, NULL
, &gen
, NULL
, NULL
,
4204 ret
= get_cur_path(sctx
, dir
, gen
, p
);
4207 ret
= fs_path_add_path(p
, name
);
4211 ret
= __record_ref(refs
, dir
, gen
, p
);
4219 static int __record_new_ref(int num
, u64 dir
, int index
,
4220 struct fs_path
*name
,
4223 struct send_ctx
*sctx
= ctx
;
4224 return record_ref(sctx
->send_root
, dir
, name
, ctx
, &sctx
->new_refs
);
4228 static int __record_deleted_ref(int num
, u64 dir
, int index
,
4229 struct fs_path
*name
,
4232 struct send_ctx
*sctx
= ctx
;
4233 return record_ref(sctx
->parent_root
, dir
, name
, ctx
,
4234 &sctx
->deleted_refs
);
4237 static int record_new_ref(struct send_ctx
*sctx
)
4241 ret
= iterate_inode_ref(sctx
->send_root
, sctx
->left_path
,
4242 sctx
->cmp_key
, 0, __record_new_ref
, sctx
);
4251 static int record_deleted_ref(struct send_ctx
*sctx
)
4255 ret
= iterate_inode_ref(sctx
->parent_root
, sctx
->right_path
,
4256 sctx
->cmp_key
, 0, __record_deleted_ref
, sctx
);
4265 struct find_ref_ctx
{
4268 struct btrfs_root
*root
;
4269 struct fs_path
*name
;
4273 static int __find_iref(int num
, u64 dir
, int index
,
4274 struct fs_path
*name
,
4277 struct find_ref_ctx
*ctx
= ctx_
;
4281 if (dir
== ctx
->dir
&& fs_path_len(name
) == fs_path_len(ctx
->name
) &&
4282 strncmp(name
->start
, ctx
->name
->start
, fs_path_len(name
)) == 0) {
4284 * To avoid doing extra lookups we'll only do this if everything
4287 ret
= get_inode_info(ctx
->root
, dir
, NULL
, &dir_gen
, NULL
,
4291 if (dir_gen
!= ctx
->dir_gen
)
4293 ctx
->found_idx
= num
;
4299 static int find_iref(struct btrfs_root
*root
,
4300 struct btrfs_path
*path
,
4301 struct btrfs_key
*key
,
4302 u64 dir
, u64 dir_gen
, struct fs_path
*name
)
4305 struct find_ref_ctx ctx
;
4309 ctx
.dir_gen
= dir_gen
;
4313 ret
= iterate_inode_ref(root
, path
, key
, 0, __find_iref
, &ctx
);
4317 if (ctx
.found_idx
== -1)
4320 return ctx
.found_idx
;
4323 static int __record_changed_new_ref(int num
, u64 dir
, int index
,
4324 struct fs_path
*name
,
4329 struct send_ctx
*sctx
= ctx
;
4331 ret
= get_inode_info(sctx
->send_root
, dir
, NULL
, &dir_gen
, NULL
,
4336 ret
= find_iref(sctx
->parent_root
, sctx
->right_path
,
4337 sctx
->cmp_key
, dir
, dir_gen
, name
);
4339 ret
= __record_new_ref(num
, dir
, index
, name
, sctx
);
4346 static int __record_changed_deleted_ref(int num
, u64 dir
, int index
,
4347 struct fs_path
*name
,
4352 struct send_ctx
*sctx
= ctx
;
4354 ret
= get_inode_info(sctx
->parent_root
, dir
, NULL
, &dir_gen
, NULL
,
4359 ret
= find_iref(sctx
->send_root
, sctx
->left_path
, sctx
->cmp_key
,
4360 dir
, dir_gen
, name
);
4362 ret
= __record_deleted_ref(num
, dir
, index
, name
, sctx
);
4369 static int record_changed_ref(struct send_ctx
*sctx
)
4373 ret
= iterate_inode_ref(sctx
->send_root
, sctx
->left_path
,
4374 sctx
->cmp_key
, 0, __record_changed_new_ref
, sctx
);
4377 ret
= iterate_inode_ref(sctx
->parent_root
, sctx
->right_path
,
4378 sctx
->cmp_key
, 0, __record_changed_deleted_ref
, sctx
);
4388 * Record and process all refs at once. Needed when an inode changes the
4389 * generation number, which means that it was deleted and recreated.
4391 static int process_all_refs(struct send_ctx
*sctx
,
4392 enum btrfs_compare_tree_result cmd
)
4395 struct btrfs_root
*root
;
4396 struct btrfs_path
*path
;
4397 struct btrfs_key key
;
4398 struct btrfs_key found_key
;
4399 struct extent_buffer
*eb
;
4401 iterate_inode_ref_t cb
;
4402 int pending_move
= 0;
4404 path
= alloc_path_for_send();
4408 if (cmd
== BTRFS_COMPARE_TREE_NEW
) {
4409 root
= sctx
->send_root
;
4410 cb
= __record_new_ref
;
4411 } else if (cmd
== BTRFS_COMPARE_TREE_DELETED
) {
4412 root
= sctx
->parent_root
;
4413 cb
= __record_deleted_ref
;
4415 btrfs_err(sctx
->send_root
->fs_info
,
4416 "Wrong command %d in process_all_refs", cmd
);
4421 key
.objectid
= sctx
->cmp_key
->objectid
;
4422 key
.type
= BTRFS_INODE_REF_KEY
;
4424 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
4429 eb
= path
->nodes
[0];
4430 slot
= path
->slots
[0];
4431 if (slot
>= btrfs_header_nritems(eb
)) {
4432 ret
= btrfs_next_leaf(root
, path
);
4440 btrfs_item_key_to_cpu(eb
, &found_key
, slot
);
4442 if (found_key
.objectid
!= key
.objectid
||
4443 (found_key
.type
!= BTRFS_INODE_REF_KEY
&&
4444 found_key
.type
!= BTRFS_INODE_EXTREF_KEY
))
4447 ret
= iterate_inode_ref(root
, path
, &found_key
, 0, cb
, sctx
);
4453 btrfs_release_path(path
);
4456 * We don't actually care about pending_move as we are simply
4457 * re-creating this inode and will be rename'ing it into place once we
4458 * rename the parent directory.
4460 ret
= process_recorded_refs(sctx
, &pending_move
);
4462 btrfs_free_path(path
);
4466 static int send_set_xattr(struct send_ctx
*sctx
,
4467 struct fs_path
*path
,
4468 const char *name
, int name_len
,
4469 const char *data
, int data_len
)
4473 ret
= begin_cmd(sctx
, BTRFS_SEND_C_SET_XATTR
);
4477 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH
, path
);
4478 TLV_PUT_STRING(sctx
, BTRFS_SEND_A_XATTR_NAME
, name
, name_len
);
4479 TLV_PUT(sctx
, BTRFS_SEND_A_XATTR_DATA
, data
, data_len
);
4481 ret
= send_cmd(sctx
);
4488 static int send_remove_xattr(struct send_ctx
*sctx
,
4489 struct fs_path
*path
,
4490 const char *name
, int name_len
)
4494 ret
= begin_cmd(sctx
, BTRFS_SEND_C_REMOVE_XATTR
);
4498 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH
, path
);
4499 TLV_PUT_STRING(sctx
, BTRFS_SEND_A_XATTR_NAME
, name
, name_len
);
4501 ret
= send_cmd(sctx
);
4508 static int __process_new_xattr(int num
, struct btrfs_key
*di_key
,
4509 const char *name
, int name_len
,
4510 const char *data
, int data_len
,
4514 struct send_ctx
*sctx
= ctx
;
4516 struct posix_acl_xattr_header dummy_acl
;
4518 p
= fs_path_alloc();
4523 * This hack is needed because empty acls are stored as zero byte
4524 * data in xattrs. Problem with that is, that receiving these zero byte
4525 * acls will fail later. To fix this, we send a dummy acl list that
4526 * only contains the version number and no entries.
4528 if (!strncmp(name
, XATTR_NAME_POSIX_ACL_ACCESS
, name_len
) ||
4529 !strncmp(name
, XATTR_NAME_POSIX_ACL_DEFAULT
, name_len
)) {
4530 if (data_len
== 0) {
4531 dummy_acl
.a_version
=
4532 cpu_to_le32(POSIX_ACL_XATTR_VERSION
);
4533 data
= (char *)&dummy_acl
;
4534 data_len
= sizeof(dummy_acl
);
4538 ret
= get_cur_path(sctx
, sctx
->cur_ino
, sctx
->cur_inode_gen
, p
);
4542 ret
= send_set_xattr(sctx
, p
, name
, name_len
, data
, data_len
);
4549 static int __process_deleted_xattr(int num
, struct btrfs_key
*di_key
,
4550 const char *name
, int name_len
,
4551 const char *data
, int data_len
,
4555 struct send_ctx
*sctx
= ctx
;
4558 p
= fs_path_alloc();
4562 ret
= get_cur_path(sctx
, sctx
->cur_ino
, sctx
->cur_inode_gen
, p
);
4566 ret
= send_remove_xattr(sctx
, p
, name
, name_len
);
4573 static int process_new_xattr(struct send_ctx
*sctx
)
4577 ret
= iterate_dir_item(sctx
->send_root
, sctx
->left_path
,
4578 __process_new_xattr
, sctx
);
4583 static int process_deleted_xattr(struct send_ctx
*sctx
)
4585 return iterate_dir_item(sctx
->parent_root
, sctx
->right_path
,
4586 __process_deleted_xattr
, sctx
);
4589 struct find_xattr_ctx
{
4597 static int __find_xattr(int num
, struct btrfs_key
*di_key
,
4598 const char *name
, int name_len
,
4599 const char *data
, int data_len
,
4600 u8 type
, void *vctx
)
4602 struct find_xattr_ctx
*ctx
= vctx
;
4604 if (name_len
== ctx
->name_len
&&
4605 strncmp(name
, ctx
->name
, name_len
) == 0) {
4606 ctx
->found_idx
= num
;
4607 ctx
->found_data_len
= data_len
;
4608 ctx
->found_data
= kmemdup(data
, data_len
, GFP_KERNEL
);
4609 if (!ctx
->found_data
)
4616 static int find_xattr(struct btrfs_root
*root
,
4617 struct btrfs_path
*path
,
4618 struct btrfs_key
*key
,
4619 const char *name
, int name_len
,
4620 char **data
, int *data_len
)
4623 struct find_xattr_ctx ctx
;
4626 ctx
.name_len
= name_len
;
4628 ctx
.found_data
= NULL
;
4629 ctx
.found_data_len
= 0;
4631 ret
= iterate_dir_item(root
, path
, __find_xattr
, &ctx
);
4635 if (ctx
.found_idx
== -1)
4638 *data
= ctx
.found_data
;
4639 *data_len
= ctx
.found_data_len
;
4641 kfree(ctx
.found_data
);
4643 return ctx
.found_idx
;
4647 static int __process_changed_new_xattr(int num
, struct btrfs_key
*di_key
,
4648 const char *name
, int name_len
,
4649 const char *data
, int data_len
,
4653 struct send_ctx
*sctx
= ctx
;
4654 char *found_data
= NULL
;
4655 int found_data_len
= 0;
4657 ret
= find_xattr(sctx
->parent_root
, sctx
->right_path
,
4658 sctx
->cmp_key
, name
, name_len
, &found_data
,
4660 if (ret
== -ENOENT
) {
4661 ret
= __process_new_xattr(num
, di_key
, name
, name_len
, data
,
4662 data_len
, type
, ctx
);
4663 } else if (ret
>= 0) {
4664 if (data_len
!= found_data_len
||
4665 memcmp(data
, found_data
, data_len
)) {
4666 ret
= __process_new_xattr(num
, di_key
, name
, name_len
,
4667 data
, data_len
, type
, ctx
);
4677 static int __process_changed_deleted_xattr(int num
, struct btrfs_key
*di_key
,
4678 const char *name
, int name_len
,
4679 const char *data
, int data_len
,
4683 struct send_ctx
*sctx
= ctx
;
4685 ret
= find_xattr(sctx
->send_root
, sctx
->left_path
, sctx
->cmp_key
,
4686 name
, name_len
, NULL
, NULL
);
4688 ret
= __process_deleted_xattr(num
, di_key
, name
, name_len
, data
,
4689 data_len
, type
, ctx
);
4696 static int process_changed_xattr(struct send_ctx
*sctx
)
4700 ret
= iterate_dir_item(sctx
->send_root
, sctx
->left_path
,
4701 __process_changed_new_xattr
, sctx
);
4704 ret
= iterate_dir_item(sctx
->parent_root
, sctx
->right_path
,
4705 __process_changed_deleted_xattr
, sctx
);
4711 static int process_all_new_xattrs(struct send_ctx
*sctx
)
4714 struct btrfs_root
*root
;
4715 struct btrfs_path
*path
;
4716 struct btrfs_key key
;
4717 struct btrfs_key found_key
;
4718 struct extent_buffer
*eb
;
4721 path
= alloc_path_for_send();
4725 root
= sctx
->send_root
;
4727 key
.objectid
= sctx
->cmp_key
->objectid
;
4728 key
.type
= BTRFS_XATTR_ITEM_KEY
;
4730 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
4735 eb
= path
->nodes
[0];
4736 slot
= path
->slots
[0];
4737 if (slot
>= btrfs_header_nritems(eb
)) {
4738 ret
= btrfs_next_leaf(root
, path
);
4741 } else if (ret
> 0) {
4748 btrfs_item_key_to_cpu(eb
, &found_key
, slot
);
4749 if (found_key
.objectid
!= key
.objectid
||
4750 found_key
.type
!= key
.type
) {
4755 ret
= iterate_dir_item(root
, path
, __process_new_xattr
, sctx
);
4763 btrfs_free_path(path
);
4767 static ssize_t
fill_read_buf(struct send_ctx
*sctx
, u64 offset
, u32 len
)
4769 struct btrfs_root
*root
= sctx
->send_root
;
4770 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
4771 struct inode
*inode
;
4774 struct btrfs_key key
;
4775 pgoff_t index
= offset
>> PAGE_SHIFT
;
4777 unsigned pg_offset
= offset
& ~PAGE_MASK
;
4780 key
.objectid
= sctx
->cur_ino
;
4781 key
.type
= BTRFS_INODE_ITEM_KEY
;
4784 inode
= btrfs_iget(fs_info
->sb
, &key
, root
, NULL
);
4786 return PTR_ERR(inode
);
4788 if (offset
+ len
> i_size_read(inode
)) {
4789 if (offset
> i_size_read(inode
))
4792 len
= offset
- i_size_read(inode
);
4797 last_index
= (offset
+ len
- 1) >> PAGE_SHIFT
;
4799 /* initial readahead */
4800 memset(&sctx
->ra
, 0, sizeof(struct file_ra_state
));
4801 file_ra_state_init(&sctx
->ra
, inode
->i_mapping
);
4803 while (index
<= last_index
) {
4804 unsigned cur_len
= min_t(unsigned, len
,
4805 PAGE_SIZE
- pg_offset
);
4807 page
= find_lock_page(inode
->i_mapping
, index
);
4809 page_cache_sync_readahead(inode
->i_mapping
, &sctx
->ra
,
4810 NULL
, index
, last_index
+ 1 - index
);
4812 page
= find_or_create_page(inode
->i_mapping
, index
,
4820 if (PageReadahead(page
)) {
4821 page_cache_async_readahead(inode
->i_mapping
, &sctx
->ra
,
4822 NULL
, page
, index
, last_index
+ 1 - index
);
4825 if (!PageUptodate(page
)) {
4826 btrfs_readpage(NULL
, page
);
4828 if (!PageUptodate(page
)) {
4837 memcpy(sctx
->read_buf
+ ret
, addr
+ pg_offset
, cur_len
);
4852 * Read some bytes from the current inode/file and send a write command to
4855 static int send_write(struct send_ctx
*sctx
, u64 offset
, u32 len
)
4857 struct btrfs_fs_info
*fs_info
= sctx
->send_root
->fs_info
;
4860 ssize_t num_read
= 0;
4862 p
= fs_path_alloc();
4866 btrfs_debug(fs_info
, "send_write offset=%llu, len=%d", offset
, len
);
4868 num_read
= fill_read_buf(sctx
, offset
, len
);
4869 if (num_read
<= 0) {
4875 ret
= begin_cmd(sctx
, BTRFS_SEND_C_WRITE
);
4879 ret
= get_cur_path(sctx
, sctx
->cur_ino
, sctx
->cur_inode_gen
, p
);
4883 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH
, p
);
4884 TLV_PUT_U64(sctx
, BTRFS_SEND_A_FILE_OFFSET
, offset
);
4885 TLV_PUT(sctx
, BTRFS_SEND_A_DATA
, sctx
->read_buf
, num_read
);
4887 ret
= send_cmd(sctx
);
4898 * Send a clone command to user space.
4900 static int send_clone(struct send_ctx
*sctx
,
4901 u64 offset
, u32 len
,
4902 struct clone_root
*clone_root
)
4908 btrfs_debug(sctx
->send_root
->fs_info
,
4909 "send_clone offset=%llu, len=%d, clone_root=%llu, clone_inode=%llu, clone_offset=%llu",
4910 offset
, len
, clone_root
->root
->objectid
, clone_root
->ino
,
4911 clone_root
->offset
);
4913 p
= fs_path_alloc();
4917 ret
= begin_cmd(sctx
, BTRFS_SEND_C_CLONE
);
4921 ret
= get_cur_path(sctx
, sctx
->cur_ino
, sctx
->cur_inode_gen
, p
);
4925 TLV_PUT_U64(sctx
, BTRFS_SEND_A_FILE_OFFSET
, offset
);
4926 TLV_PUT_U64(sctx
, BTRFS_SEND_A_CLONE_LEN
, len
);
4927 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH
, p
);
4929 if (clone_root
->root
== sctx
->send_root
) {
4930 ret
= get_inode_info(sctx
->send_root
, clone_root
->ino
, NULL
,
4931 &gen
, NULL
, NULL
, NULL
, NULL
);
4934 ret
= get_cur_path(sctx
, clone_root
->ino
, gen
, p
);
4936 ret
= get_inode_path(clone_root
->root
, clone_root
->ino
, p
);
4942 * If the parent we're using has a received_uuid set then use that as
4943 * our clone source as that is what we will look for when doing a
4946 * This covers the case that we create a snapshot off of a received
4947 * subvolume and then use that as the parent and try to receive on a
4950 if (!btrfs_is_empty_uuid(clone_root
->root
->root_item
.received_uuid
))
4951 TLV_PUT_UUID(sctx
, BTRFS_SEND_A_CLONE_UUID
,
4952 clone_root
->root
->root_item
.received_uuid
);
4954 TLV_PUT_UUID(sctx
, BTRFS_SEND_A_CLONE_UUID
,
4955 clone_root
->root
->root_item
.uuid
);
4956 TLV_PUT_U64(sctx
, BTRFS_SEND_A_CLONE_CTRANSID
,
4957 le64_to_cpu(clone_root
->root
->root_item
.ctransid
));
4958 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_CLONE_PATH
, p
);
4959 TLV_PUT_U64(sctx
, BTRFS_SEND_A_CLONE_OFFSET
,
4960 clone_root
->offset
);
4962 ret
= send_cmd(sctx
);
4971 * Send an update extent command to user space.
4973 static int send_update_extent(struct send_ctx
*sctx
,
4974 u64 offset
, u32 len
)
4979 p
= fs_path_alloc();
4983 ret
= begin_cmd(sctx
, BTRFS_SEND_C_UPDATE_EXTENT
);
4987 ret
= get_cur_path(sctx
, sctx
->cur_ino
, sctx
->cur_inode_gen
, p
);
4991 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH
, p
);
4992 TLV_PUT_U64(sctx
, BTRFS_SEND_A_FILE_OFFSET
, offset
);
4993 TLV_PUT_U64(sctx
, BTRFS_SEND_A_SIZE
, len
);
4995 ret
= send_cmd(sctx
);
5003 static int send_hole(struct send_ctx
*sctx
, u64 end
)
5005 struct fs_path
*p
= NULL
;
5006 u64 offset
= sctx
->cur_inode_last_extent
;
5011 * A hole that starts at EOF or beyond it. Since we do not yet support
5012 * fallocate (for extent preallocation and hole punching), sending a
5013 * write of zeroes starting at EOF or beyond would later require issuing
5014 * a truncate operation which would undo the write and achieve nothing.
5016 if (offset
>= sctx
->cur_inode_size
)
5019 if (sctx
->flags
& BTRFS_SEND_FLAG_NO_FILE_DATA
)
5020 return send_update_extent(sctx
, offset
, end
- offset
);
5022 p
= fs_path_alloc();
5025 ret
= get_cur_path(sctx
, sctx
->cur_ino
, sctx
->cur_inode_gen
, p
);
5027 goto tlv_put_failure
;
5028 memset(sctx
->read_buf
, 0, BTRFS_SEND_READ_SIZE
);
5029 while (offset
< end
) {
5030 len
= min_t(u64
, end
- offset
, BTRFS_SEND_READ_SIZE
);
5032 ret
= begin_cmd(sctx
, BTRFS_SEND_C_WRITE
);
5035 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH
, p
);
5036 TLV_PUT_U64(sctx
, BTRFS_SEND_A_FILE_OFFSET
, offset
);
5037 TLV_PUT(sctx
, BTRFS_SEND_A_DATA
, sctx
->read_buf
, len
);
5038 ret
= send_cmd(sctx
);
5043 sctx
->cur_inode_next_write_offset
= offset
;
5049 static int send_extent_data(struct send_ctx
*sctx
,
5055 if (sctx
->flags
& BTRFS_SEND_FLAG_NO_FILE_DATA
)
5056 return send_update_extent(sctx
, offset
, len
);
5058 while (sent
< len
) {
5059 u64 size
= len
- sent
;
5062 if (size
> BTRFS_SEND_READ_SIZE
)
5063 size
= BTRFS_SEND_READ_SIZE
;
5064 ret
= send_write(sctx
, offset
+ sent
, size
);
5074 static int clone_range(struct send_ctx
*sctx
,
5075 struct clone_root
*clone_root
,
5076 const u64 disk_byte
,
5081 struct btrfs_path
*path
;
5082 struct btrfs_key key
;
5086 * Prevent cloning from a zero offset with a length matching the sector
5087 * size because in some scenarios this will make the receiver fail.
5089 * For example, if in the source filesystem the extent at offset 0
5090 * has a length of sectorsize and it was written using direct IO, then
5091 * it can never be an inline extent (even if compression is enabled).
5092 * Then this extent can be cloned in the original filesystem to a non
5093 * zero file offset, but it may not be possible to clone in the
5094 * destination filesystem because it can be inlined due to compression
5095 * on the destination filesystem (as the receiver's write operations are
5096 * always done using buffered IO). The same happens when the original
5097 * filesystem does not have compression enabled but the destination
5100 if (clone_root
->offset
== 0 &&
5101 len
== sctx
->send_root
->fs_info
->sectorsize
)
5102 return send_extent_data(sctx
, offset
, len
);
5104 path
= alloc_path_for_send();
5109 * We can't send a clone operation for the entire range if we find
5110 * extent items in the respective range in the source file that
5111 * refer to different extents or if we find holes.
5112 * So check for that and do a mix of clone and regular write/copy
5113 * operations if needed.
5117 * mkfs.btrfs -f /dev/sda
5118 * mount /dev/sda /mnt
5119 * xfs_io -f -c "pwrite -S 0xaa 0K 100K" /mnt/foo
5120 * cp --reflink=always /mnt/foo /mnt/bar
5121 * xfs_io -c "pwrite -S 0xbb 50K 50K" /mnt/foo
5122 * btrfs subvolume snapshot -r /mnt /mnt/snap
5124 * If when we send the snapshot and we are processing file bar (which
5125 * has a higher inode number than foo) we blindly send a clone operation
5126 * for the [0, 100K[ range from foo to bar, the receiver ends up getting
5127 * a file bar that matches the content of file foo - iow, doesn't match
5128 * the content from bar in the original filesystem.
5130 key
.objectid
= clone_root
->ino
;
5131 key
.type
= BTRFS_EXTENT_DATA_KEY
;
5132 key
.offset
= clone_root
->offset
;
5133 ret
= btrfs_search_slot(NULL
, clone_root
->root
, &key
, path
, 0, 0);
5136 if (ret
> 0 && path
->slots
[0] > 0) {
5137 btrfs_item_key_to_cpu(path
->nodes
[0], &key
, path
->slots
[0] - 1);
5138 if (key
.objectid
== clone_root
->ino
&&
5139 key
.type
== BTRFS_EXTENT_DATA_KEY
)
5144 struct extent_buffer
*leaf
= path
->nodes
[0];
5145 int slot
= path
->slots
[0];
5146 struct btrfs_file_extent_item
*ei
;
5151 if (slot
>= btrfs_header_nritems(leaf
)) {
5152 ret
= btrfs_next_leaf(clone_root
->root
, path
);
5160 btrfs_item_key_to_cpu(leaf
, &key
, slot
);
5163 * We might have an implicit trailing hole (NO_HOLES feature
5164 * enabled). We deal with it after leaving this loop.
5166 if (key
.objectid
!= clone_root
->ino
||
5167 key
.type
!= BTRFS_EXTENT_DATA_KEY
)
5170 ei
= btrfs_item_ptr(leaf
, slot
, struct btrfs_file_extent_item
);
5171 type
= btrfs_file_extent_type(leaf
, ei
);
5172 if (type
== BTRFS_FILE_EXTENT_INLINE
) {
5173 ext_len
= btrfs_file_extent_ram_bytes(leaf
, ei
);
5174 ext_len
= PAGE_ALIGN(ext_len
);
5176 ext_len
= btrfs_file_extent_num_bytes(leaf
, ei
);
5179 if (key
.offset
+ ext_len
<= clone_root
->offset
)
5182 if (key
.offset
> clone_root
->offset
) {
5183 /* Implicit hole, NO_HOLES feature enabled. */
5184 u64 hole_len
= key
.offset
- clone_root
->offset
;
5188 ret
= send_extent_data(sctx
, offset
, hole_len
);
5196 clone_root
->offset
+= hole_len
;
5197 data_offset
+= hole_len
;
5200 if (key
.offset
>= clone_root
->offset
+ len
)
5203 clone_len
= min_t(u64
, ext_len
, len
);
5205 if (btrfs_file_extent_disk_bytenr(leaf
, ei
) == disk_byte
&&
5206 btrfs_file_extent_offset(leaf
, ei
) == data_offset
)
5207 ret
= send_clone(sctx
, offset
, clone_len
, clone_root
);
5209 ret
= send_extent_data(sctx
, offset
, clone_len
);
5217 offset
+= clone_len
;
5218 clone_root
->offset
+= clone_len
;
5219 data_offset
+= clone_len
;
5225 ret
= send_extent_data(sctx
, offset
, len
);
5229 btrfs_free_path(path
);
5233 static int send_write_or_clone(struct send_ctx
*sctx
,
5234 struct btrfs_path
*path
,
5235 struct btrfs_key
*key
,
5236 struct clone_root
*clone_root
)
5239 struct btrfs_file_extent_item
*ei
;
5240 u64 offset
= key
->offset
;
5243 u64 bs
= sctx
->send_root
->fs_info
->sb
->s_blocksize
;
5245 ei
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
5246 struct btrfs_file_extent_item
);
5247 type
= btrfs_file_extent_type(path
->nodes
[0], ei
);
5248 if (type
== BTRFS_FILE_EXTENT_INLINE
) {
5249 len
= btrfs_file_extent_ram_bytes(path
->nodes
[0], ei
);
5251 * it is possible the inline item won't cover the whole page,
5252 * but there may be items after this page. Make
5253 * sure to send the whole thing
5255 len
= PAGE_ALIGN(len
);
5257 len
= btrfs_file_extent_num_bytes(path
->nodes
[0], ei
);
5260 if (offset
>= sctx
->cur_inode_size
) {
5264 if (offset
+ len
> sctx
->cur_inode_size
)
5265 len
= sctx
->cur_inode_size
- offset
;
5271 if (clone_root
&& IS_ALIGNED(offset
+ len
, bs
)) {
5275 disk_byte
= btrfs_file_extent_disk_bytenr(path
->nodes
[0], ei
);
5276 data_offset
= btrfs_file_extent_offset(path
->nodes
[0], ei
);
5277 ret
= clone_range(sctx
, clone_root
, disk_byte
, data_offset
,
5280 ret
= send_extent_data(sctx
, offset
, len
);
5282 sctx
->cur_inode_next_write_offset
= offset
+ len
;
5287 static int is_extent_unchanged(struct send_ctx
*sctx
,
5288 struct btrfs_path
*left_path
,
5289 struct btrfs_key
*ekey
)
5292 struct btrfs_key key
;
5293 struct btrfs_path
*path
= NULL
;
5294 struct extent_buffer
*eb
;
5296 struct btrfs_key found_key
;
5297 struct btrfs_file_extent_item
*ei
;
5302 u64 left_offset_fixed
;
5310 path
= alloc_path_for_send();
5314 eb
= left_path
->nodes
[0];
5315 slot
= left_path
->slots
[0];
5316 ei
= btrfs_item_ptr(eb
, slot
, struct btrfs_file_extent_item
);
5317 left_type
= btrfs_file_extent_type(eb
, ei
);
5319 if (left_type
!= BTRFS_FILE_EXTENT_REG
) {
5323 left_disknr
= btrfs_file_extent_disk_bytenr(eb
, ei
);
5324 left_len
= btrfs_file_extent_num_bytes(eb
, ei
);
5325 left_offset
= btrfs_file_extent_offset(eb
, ei
);
5326 left_gen
= btrfs_file_extent_generation(eb
, ei
);
5329 * Following comments will refer to these graphics. L is the left
5330 * extents which we are checking at the moment. 1-8 are the right
5331 * extents that we iterate.
5334 * |-1-|-2a-|-3-|-4-|-5-|-6-|
5337 * |--1--|-2b-|...(same as above)
5339 * Alternative situation. Happens on files where extents got split.
5341 * |-----------7-----------|-6-|
5343 * Alternative situation. Happens on files which got larger.
5346 * Nothing follows after 8.
5349 key
.objectid
= ekey
->objectid
;
5350 key
.type
= BTRFS_EXTENT_DATA_KEY
;
5351 key
.offset
= ekey
->offset
;
5352 ret
= btrfs_search_slot_for_read(sctx
->parent_root
, &key
, path
, 0, 0);
5361 * Handle special case where the right side has no extents at all.
5363 eb
= path
->nodes
[0];
5364 slot
= path
->slots
[0];
5365 btrfs_item_key_to_cpu(eb
, &found_key
, slot
);
5366 if (found_key
.objectid
!= key
.objectid
||
5367 found_key
.type
!= key
.type
) {
5368 /* If we're a hole then just pretend nothing changed */
5369 ret
= (left_disknr
) ? 0 : 1;
5374 * We're now on 2a, 2b or 7.
5377 while (key
.offset
< ekey
->offset
+ left_len
) {
5378 ei
= btrfs_item_ptr(eb
, slot
, struct btrfs_file_extent_item
);
5379 right_type
= btrfs_file_extent_type(eb
, ei
);
5380 if (right_type
!= BTRFS_FILE_EXTENT_REG
&&
5381 right_type
!= BTRFS_FILE_EXTENT_INLINE
) {
5386 if (right_type
== BTRFS_FILE_EXTENT_INLINE
) {
5387 right_len
= btrfs_file_extent_ram_bytes(eb
, ei
);
5388 right_len
= PAGE_ALIGN(right_len
);
5390 right_len
= btrfs_file_extent_num_bytes(eb
, ei
);
5394 * Are we at extent 8? If yes, we know the extent is changed.
5395 * This may only happen on the first iteration.
5397 if (found_key
.offset
+ right_len
<= ekey
->offset
) {
5398 /* If we're a hole just pretend nothing changed */
5399 ret
= (left_disknr
) ? 0 : 1;
5404 * We just wanted to see if when we have an inline extent, what
5405 * follows it is a regular extent (wanted to check the above
5406 * condition for inline extents too). This should normally not
5407 * happen but it's possible for example when we have an inline
5408 * compressed extent representing data with a size matching
5409 * the page size (currently the same as sector size).
5411 if (right_type
== BTRFS_FILE_EXTENT_INLINE
) {
5416 right_disknr
= btrfs_file_extent_disk_bytenr(eb
, ei
);
5417 right_offset
= btrfs_file_extent_offset(eb
, ei
);
5418 right_gen
= btrfs_file_extent_generation(eb
, ei
);
5420 left_offset_fixed
= left_offset
;
5421 if (key
.offset
< ekey
->offset
) {
5422 /* Fix the right offset for 2a and 7. */
5423 right_offset
+= ekey
->offset
- key
.offset
;
5425 /* Fix the left offset for all behind 2a and 2b */
5426 left_offset_fixed
+= key
.offset
- ekey
->offset
;
5430 * Check if we have the same extent.
5432 if (left_disknr
!= right_disknr
||
5433 left_offset_fixed
!= right_offset
||
5434 left_gen
!= right_gen
) {
5440 * Go to the next extent.
5442 ret
= btrfs_next_item(sctx
->parent_root
, path
);
5446 eb
= path
->nodes
[0];
5447 slot
= path
->slots
[0];
5448 btrfs_item_key_to_cpu(eb
, &found_key
, slot
);
5450 if (ret
|| found_key
.objectid
!= key
.objectid
||
5451 found_key
.type
!= key
.type
) {
5452 key
.offset
+= right_len
;
5455 if (found_key
.offset
!= key
.offset
+ right_len
) {
5463 * We're now behind the left extent (treat as unchanged) or at the end
5464 * of the right side (treat as changed).
5466 if (key
.offset
>= ekey
->offset
+ left_len
)
5473 btrfs_free_path(path
);
5477 static int get_last_extent(struct send_ctx
*sctx
, u64 offset
)
5479 struct btrfs_path
*path
;
5480 struct btrfs_root
*root
= sctx
->send_root
;
5481 struct btrfs_file_extent_item
*fi
;
5482 struct btrfs_key key
;
5487 path
= alloc_path_for_send();
5491 sctx
->cur_inode_last_extent
= 0;
5493 key
.objectid
= sctx
->cur_ino
;
5494 key
.type
= BTRFS_EXTENT_DATA_KEY
;
5495 key
.offset
= offset
;
5496 ret
= btrfs_search_slot_for_read(root
, &key
, path
, 0, 1);
5500 btrfs_item_key_to_cpu(path
->nodes
[0], &key
, path
->slots
[0]);
5501 if (key
.objectid
!= sctx
->cur_ino
|| key
.type
!= BTRFS_EXTENT_DATA_KEY
)
5504 fi
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
5505 struct btrfs_file_extent_item
);
5506 type
= btrfs_file_extent_type(path
->nodes
[0], fi
);
5507 if (type
== BTRFS_FILE_EXTENT_INLINE
) {
5508 u64 size
= btrfs_file_extent_ram_bytes(path
->nodes
[0], fi
);
5509 extent_end
= ALIGN(key
.offset
+ size
,
5510 sctx
->send_root
->fs_info
->sectorsize
);
5512 extent_end
= key
.offset
+
5513 btrfs_file_extent_num_bytes(path
->nodes
[0], fi
);
5515 sctx
->cur_inode_last_extent
= extent_end
;
5517 btrfs_free_path(path
);
5521 static int range_is_hole_in_parent(struct send_ctx
*sctx
,
5525 struct btrfs_path
*path
;
5526 struct btrfs_key key
;
5527 struct btrfs_root
*root
= sctx
->parent_root
;
5528 u64 search_start
= start
;
5531 path
= alloc_path_for_send();
5535 key
.objectid
= sctx
->cur_ino
;
5536 key
.type
= BTRFS_EXTENT_DATA_KEY
;
5537 key
.offset
= search_start
;
5538 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
5541 if (ret
> 0 && path
->slots
[0] > 0)
5544 while (search_start
< end
) {
5545 struct extent_buffer
*leaf
= path
->nodes
[0];
5546 int slot
= path
->slots
[0];
5547 struct btrfs_file_extent_item
*fi
;
5550 if (slot
>= btrfs_header_nritems(leaf
)) {
5551 ret
= btrfs_next_leaf(root
, path
);
5559 btrfs_item_key_to_cpu(leaf
, &key
, slot
);
5560 if (key
.objectid
< sctx
->cur_ino
||
5561 key
.type
< BTRFS_EXTENT_DATA_KEY
)
5563 if (key
.objectid
> sctx
->cur_ino
||
5564 key
.type
> BTRFS_EXTENT_DATA_KEY
||
5568 fi
= btrfs_item_ptr(leaf
, slot
, struct btrfs_file_extent_item
);
5569 if (btrfs_file_extent_type(leaf
, fi
) ==
5570 BTRFS_FILE_EXTENT_INLINE
) {
5571 u64 size
= btrfs_file_extent_ram_bytes(leaf
, fi
);
5573 extent_end
= ALIGN(key
.offset
+ size
,
5574 root
->fs_info
->sectorsize
);
5576 extent_end
= key
.offset
+
5577 btrfs_file_extent_num_bytes(leaf
, fi
);
5579 if (extent_end
<= start
)
5581 if (btrfs_file_extent_disk_bytenr(leaf
, fi
) == 0) {
5582 search_start
= extent_end
;
5592 btrfs_free_path(path
);
5596 static int maybe_send_hole(struct send_ctx
*sctx
, struct btrfs_path
*path
,
5597 struct btrfs_key
*key
)
5599 struct btrfs_file_extent_item
*fi
;
5604 if (sctx
->cur_ino
!= key
->objectid
|| !need_send_hole(sctx
))
5607 if (sctx
->cur_inode_last_extent
== (u64
)-1) {
5608 ret
= get_last_extent(sctx
, key
->offset
- 1);
5613 fi
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
5614 struct btrfs_file_extent_item
);
5615 type
= btrfs_file_extent_type(path
->nodes
[0], fi
);
5616 if (type
== BTRFS_FILE_EXTENT_INLINE
) {
5617 u64 size
= btrfs_file_extent_ram_bytes(path
->nodes
[0], fi
);
5618 extent_end
= ALIGN(key
->offset
+ size
,
5619 sctx
->send_root
->fs_info
->sectorsize
);
5621 extent_end
= key
->offset
+
5622 btrfs_file_extent_num_bytes(path
->nodes
[0], fi
);
5625 if (path
->slots
[0] == 0 &&
5626 sctx
->cur_inode_last_extent
< key
->offset
) {
5628 * We might have skipped entire leafs that contained only
5629 * file extent items for our current inode. These leafs have
5630 * a generation number smaller (older) than the one in the
5631 * current leaf and the leaf our last extent came from, and
5632 * are located between these 2 leafs.
5634 ret
= get_last_extent(sctx
, key
->offset
- 1);
5639 if (sctx
->cur_inode_last_extent
< key
->offset
) {
5640 ret
= range_is_hole_in_parent(sctx
,
5641 sctx
->cur_inode_last_extent
,
5646 ret
= send_hole(sctx
, key
->offset
);
5650 sctx
->cur_inode_last_extent
= extent_end
;
5654 static int process_extent(struct send_ctx
*sctx
,
5655 struct btrfs_path
*path
,
5656 struct btrfs_key
*key
)
5658 struct clone_root
*found_clone
= NULL
;
5661 if (S_ISLNK(sctx
->cur_inode_mode
))
5664 if (sctx
->parent_root
&& !sctx
->cur_inode_new
) {
5665 ret
= is_extent_unchanged(sctx
, path
, key
);
5673 struct btrfs_file_extent_item
*ei
;
5676 ei
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
5677 struct btrfs_file_extent_item
);
5678 type
= btrfs_file_extent_type(path
->nodes
[0], ei
);
5679 if (type
== BTRFS_FILE_EXTENT_PREALLOC
||
5680 type
== BTRFS_FILE_EXTENT_REG
) {
5682 * The send spec does not have a prealloc command yet,
5683 * so just leave a hole for prealloc'ed extents until
5684 * we have enough commands queued up to justify rev'ing
5687 if (type
== BTRFS_FILE_EXTENT_PREALLOC
) {
5692 /* Have a hole, just skip it. */
5693 if (btrfs_file_extent_disk_bytenr(path
->nodes
[0], ei
) == 0) {
5700 ret
= find_extent_clone(sctx
, path
, key
->objectid
, key
->offset
,
5701 sctx
->cur_inode_size
, &found_clone
);
5702 if (ret
!= -ENOENT
&& ret
< 0)
5705 ret
= send_write_or_clone(sctx
, path
, key
, found_clone
);
5709 ret
= maybe_send_hole(sctx
, path
, key
);
5714 static int process_all_extents(struct send_ctx
*sctx
)
5717 struct btrfs_root
*root
;
5718 struct btrfs_path
*path
;
5719 struct btrfs_key key
;
5720 struct btrfs_key found_key
;
5721 struct extent_buffer
*eb
;
5724 root
= sctx
->send_root
;
5725 path
= alloc_path_for_send();
5729 key
.objectid
= sctx
->cmp_key
->objectid
;
5730 key
.type
= BTRFS_EXTENT_DATA_KEY
;
5732 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
5737 eb
= path
->nodes
[0];
5738 slot
= path
->slots
[0];
5740 if (slot
>= btrfs_header_nritems(eb
)) {
5741 ret
= btrfs_next_leaf(root
, path
);
5744 } else if (ret
> 0) {
5751 btrfs_item_key_to_cpu(eb
, &found_key
, slot
);
5753 if (found_key
.objectid
!= key
.objectid
||
5754 found_key
.type
!= key
.type
) {
5759 ret
= process_extent(sctx
, path
, &found_key
);
5767 btrfs_free_path(path
);
5771 static int process_recorded_refs_if_needed(struct send_ctx
*sctx
, int at_end
,
5773 int *refs_processed
)
5777 if (sctx
->cur_ino
== 0)
5779 if (!at_end
&& sctx
->cur_ino
== sctx
->cmp_key
->objectid
&&
5780 sctx
->cmp_key
->type
<= BTRFS_INODE_EXTREF_KEY
)
5782 if (list_empty(&sctx
->new_refs
) && list_empty(&sctx
->deleted_refs
))
5785 ret
= process_recorded_refs(sctx
, pending_move
);
5789 *refs_processed
= 1;
5794 static int finish_inode_if_needed(struct send_ctx
*sctx
, int at_end
)
5805 int need_truncate
= 1;
5806 int pending_move
= 0;
5807 int refs_processed
= 0;
5809 if (sctx
->ignore_cur_inode
)
5812 ret
= process_recorded_refs_if_needed(sctx
, at_end
, &pending_move
,
5818 * We have processed the refs and thus need to advance send_progress.
5819 * Now, calls to get_cur_xxx will take the updated refs of the current
5820 * inode into account.
5822 * On the other hand, if our current inode is a directory and couldn't
5823 * be moved/renamed because its parent was renamed/moved too and it has
5824 * a higher inode number, we can only move/rename our current inode
5825 * after we moved/renamed its parent. Therefore in this case operate on
5826 * the old path (pre move/rename) of our current inode, and the
5827 * move/rename will be performed later.
5829 if (refs_processed
&& !pending_move
)
5830 sctx
->send_progress
= sctx
->cur_ino
+ 1;
5832 if (sctx
->cur_ino
== 0 || sctx
->cur_inode_deleted
)
5834 if (!at_end
&& sctx
->cmp_key
->objectid
== sctx
->cur_ino
)
5837 ret
= get_inode_info(sctx
->send_root
, sctx
->cur_ino
, NULL
, NULL
,
5838 &left_mode
, &left_uid
, &left_gid
, NULL
);
5842 if (!sctx
->parent_root
|| sctx
->cur_inode_new
) {
5844 if (!S_ISLNK(sctx
->cur_inode_mode
))
5846 if (sctx
->cur_inode_next_write_offset
== sctx
->cur_inode_size
)
5851 ret
= get_inode_info(sctx
->parent_root
, sctx
->cur_ino
,
5852 &old_size
, NULL
, &right_mode
, &right_uid
,
5857 if (left_uid
!= right_uid
|| left_gid
!= right_gid
)
5859 if (!S_ISLNK(sctx
->cur_inode_mode
) && left_mode
!= right_mode
)
5861 if ((old_size
== sctx
->cur_inode_size
) ||
5862 (sctx
->cur_inode_size
> old_size
&&
5863 sctx
->cur_inode_next_write_offset
== sctx
->cur_inode_size
))
5867 if (S_ISREG(sctx
->cur_inode_mode
)) {
5868 if (need_send_hole(sctx
)) {
5869 if (sctx
->cur_inode_last_extent
== (u64
)-1 ||
5870 sctx
->cur_inode_last_extent
<
5871 sctx
->cur_inode_size
) {
5872 ret
= get_last_extent(sctx
, (u64
)-1);
5876 if (sctx
->cur_inode_last_extent
<
5877 sctx
->cur_inode_size
) {
5878 ret
= send_hole(sctx
, sctx
->cur_inode_size
);
5883 if (need_truncate
) {
5884 ret
= send_truncate(sctx
, sctx
->cur_ino
,
5885 sctx
->cur_inode_gen
,
5886 sctx
->cur_inode_size
);
5893 ret
= send_chown(sctx
, sctx
->cur_ino
, sctx
->cur_inode_gen
,
5894 left_uid
, left_gid
);
5899 ret
= send_chmod(sctx
, sctx
->cur_ino
, sctx
->cur_inode_gen
,
5906 * If other directory inodes depended on our current directory
5907 * inode's move/rename, now do their move/rename operations.
5909 if (!is_waiting_for_move(sctx
, sctx
->cur_ino
)) {
5910 ret
= apply_children_dir_moves(sctx
);
5914 * Need to send that every time, no matter if it actually
5915 * changed between the two trees as we have done changes to
5916 * the inode before. If our inode is a directory and it's
5917 * waiting to be moved/renamed, we will send its utimes when
5918 * it's moved/renamed, therefore we don't need to do it here.
5920 sctx
->send_progress
= sctx
->cur_ino
+ 1;
5921 ret
= send_utimes(sctx
, sctx
->cur_ino
, sctx
->cur_inode_gen
);
5930 struct parent_paths_ctx
{
5931 struct list_head
*refs
;
5932 struct send_ctx
*sctx
;
5935 static int record_parent_ref(int num
, u64 dir
, int index
, struct fs_path
*name
,
5938 struct parent_paths_ctx
*ppctx
= ctx
;
5940 return record_ref(ppctx
->sctx
->parent_root
, dir
, name
, ppctx
->sctx
,
5945 * Issue unlink operations for all paths of the current inode found in the
5948 static int btrfs_unlink_all_paths(struct send_ctx
*sctx
)
5950 LIST_HEAD(deleted_refs
);
5951 struct btrfs_path
*path
;
5952 struct btrfs_key key
;
5953 struct parent_paths_ctx ctx
;
5956 path
= alloc_path_for_send();
5960 key
.objectid
= sctx
->cur_ino
;
5961 key
.type
= BTRFS_INODE_REF_KEY
;
5963 ret
= btrfs_search_slot(NULL
, sctx
->parent_root
, &key
, path
, 0, 0);
5967 ctx
.refs
= &deleted_refs
;
5971 struct extent_buffer
*eb
= path
->nodes
[0];
5972 int slot
= path
->slots
[0];
5974 if (slot
>= btrfs_header_nritems(eb
)) {
5975 ret
= btrfs_next_leaf(sctx
->parent_root
, path
);
5983 btrfs_item_key_to_cpu(eb
, &key
, slot
);
5984 if (key
.objectid
!= sctx
->cur_ino
)
5986 if (key
.type
!= BTRFS_INODE_REF_KEY
&&
5987 key
.type
!= BTRFS_INODE_EXTREF_KEY
)
5990 ret
= iterate_inode_ref(sctx
->parent_root
, path
, &key
, 1,
5991 record_parent_ref
, &ctx
);
5998 while (!list_empty(&deleted_refs
)) {
5999 struct recorded_ref
*ref
;
6001 ref
= list_first_entry(&deleted_refs
, struct recorded_ref
, list
);
6002 ret
= send_unlink(sctx
, ref
->full_path
);
6005 fs_path_free(ref
->full_path
);
6006 list_del(&ref
->list
);
6011 btrfs_free_path(path
);
6013 __free_recorded_refs(&deleted_refs
);
6017 static int changed_inode(struct send_ctx
*sctx
,
6018 enum btrfs_compare_tree_result result
)
6021 struct btrfs_key
*key
= sctx
->cmp_key
;
6022 struct btrfs_inode_item
*left_ii
= NULL
;
6023 struct btrfs_inode_item
*right_ii
= NULL
;
6027 sctx
->cur_ino
= key
->objectid
;
6028 sctx
->cur_inode_new_gen
= 0;
6029 sctx
->cur_inode_last_extent
= (u64
)-1;
6030 sctx
->cur_inode_next_write_offset
= 0;
6031 sctx
->ignore_cur_inode
= false;
6034 * Set send_progress to current inode. This will tell all get_cur_xxx
6035 * functions that the current inode's refs are not updated yet. Later,
6036 * when process_recorded_refs is finished, it is set to cur_ino + 1.
6038 sctx
->send_progress
= sctx
->cur_ino
;
6040 if (result
== BTRFS_COMPARE_TREE_NEW
||
6041 result
== BTRFS_COMPARE_TREE_CHANGED
) {
6042 left_ii
= btrfs_item_ptr(sctx
->left_path
->nodes
[0],
6043 sctx
->left_path
->slots
[0],
6044 struct btrfs_inode_item
);
6045 left_gen
= btrfs_inode_generation(sctx
->left_path
->nodes
[0],
6048 right_ii
= btrfs_item_ptr(sctx
->right_path
->nodes
[0],
6049 sctx
->right_path
->slots
[0],
6050 struct btrfs_inode_item
);
6051 right_gen
= btrfs_inode_generation(sctx
->right_path
->nodes
[0],
6054 if (result
== BTRFS_COMPARE_TREE_CHANGED
) {
6055 right_ii
= btrfs_item_ptr(sctx
->right_path
->nodes
[0],
6056 sctx
->right_path
->slots
[0],
6057 struct btrfs_inode_item
);
6059 right_gen
= btrfs_inode_generation(sctx
->right_path
->nodes
[0],
6063 * The cur_ino = root dir case is special here. We can't treat
6064 * the inode as deleted+reused because it would generate a
6065 * stream that tries to delete/mkdir the root dir.
6067 if (left_gen
!= right_gen
&&
6068 sctx
->cur_ino
!= BTRFS_FIRST_FREE_OBJECTID
)
6069 sctx
->cur_inode_new_gen
= 1;
6073 * Normally we do not find inodes with a link count of zero (orphans)
6074 * because the most common case is to create a snapshot and use it
6075 * for a send operation. However other less common use cases involve
6076 * using a subvolume and send it after turning it to RO mode just
6077 * after deleting all hard links of a file while holding an open
6078 * file descriptor against it or turning a RO snapshot into RW mode,
6079 * keep an open file descriptor against a file, delete it and then
6080 * turn the snapshot back to RO mode before using it for a send
6081 * operation. So if we find such cases, ignore the inode and all its
6082 * items completely if it's a new inode, or if it's a changed inode
6083 * make sure all its previous paths (from the parent snapshot) are all
6084 * unlinked and all other the inode items are ignored.
6086 if (result
== BTRFS_COMPARE_TREE_NEW
||
6087 result
== BTRFS_COMPARE_TREE_CHANGED
) {
6090 nlinks
= btrfs_inode_nlink(sctx
->left_path
->nodes
[0], left_ii
);
6092 sctx
->ignore_cur_inode
= true;
6093 if (result
== BTRFS_COMPARE_TREE_CHANGED
)
6094 ret
= btrfs_unlink_all_paths(sctx
);
6099 if (result
== BTRFS_COMPARE_TREE_NEW
) {
6100 sctx
->cur_inode_gen
= left_gen
;
6101 sctx
->cur_inode_new
= 1;
6102 sctx
->cur_inode_deleted
= 0;
6103 sctx
->cur_inode_size
= btrfs_inode_size(
6104 sctx
->left_path
->nodes
[0], left_ii
);
6105 sctx
->cur_inode_mode
= btrfs_inode_mode(
6106 sctx
->left_path
->nodes
[0], left_ii
);
6107 sctx
->cur_inode_rdev
= btrfs_inode_rdev(
6108 sctx
->left_path
->nodes
[0], left_ii
);
6109 if (sctx
->cur_ino
!= BTRFS_FIRST_FREE_OBJECTID
)
6110 ret
= send_create_inode_if_needed(sctx
);
6111 } else if (result
== BTRFS_COMPARE_TREE_DELETED
) {
6112 sctx
->cur_inode_gen
= right_gen
;
6113 sctx
->cur_inode_new
= 0;
6114 sctx
->cur_inode_deleted
= 1;
6115 sctx
->cur_inode_size
= btrfs_inode_size(
6116 sctx
->right_path
->nodes
[0], right_ii
);
6117 sctx
->cur_inode_mode
= btrfs_inode_mode(
6118 sctx
->right_path
->nodes
[0], right_ii
);
6119 } else if (result
== BTRFS_COMPARE_TREE_CHANGED
) {
6121 * We need to do some special handling in case the inode was
6122 * reported as changed with a changed generation number. This
6123 * means that the original inode was deleted and new inode
6124 * reused the same inum. So we have to treat the old inode as
6125 * deleted and the new one as new.
6127 if (sctx
->cur_inode_new_gen
) {
6129 * First, process the inode as if it was deleted.
6131 sctx
->cur_inode_gen
= right_gen
;
6132 sctx
->cur_inode_new
= 0;
6133 sctx
->cur_inode_deleted
= 1;
6134 sctx
->cur_inode_size
= btrfs_inode_size(
6135 sctx
->right_path
->nodes
[0], right_ii
);
6136 sctx
->cur_inode_mode
= btrfs_inode_mode(
6137 sctx
->right_path
->nodes
[0], right_ii
);
6138 ret
= process_all_refs(sctx
,
6139 BTRFS_COMPARE_TREE_DELETED
);
6144 * Now process the inode as if it was new.
6146 sctx
->cur_inode_gen
= left_gen
;
6147 sctx
->cur_inode_new
= 1;
6148 sctx
->cur_inode_deleted
= 0;
6149 sctx
->cur_inode_size
= btrfs_inode_size(
6150 sctx
->left_path
->nodes
[0], left_ii
);
6151 sctx
->cur_inode_mode
= btrfs_inode_mode(
6152 sctx
->left_path
->nodes
[0], left_ii
);
6153 sctx
->cur_inode_rdev
= btrfs_inode_rdev(
6154 sctx
->left_path
->nodes
[0], left_ii
);
6155 ret
= send_create_inode_if_needed(sctx
);
6159 ret
= process_all_refs(sctx
, BTRFS_COMPARE_TREE_NEW
);
6163 * Advance send_progress now as we did not get into
6164 * process_recorded_refs_if_needed in the new_gen case.
6166 sctx
->send_progress
= sctx
->cur_ino
+ 1;
6169 * Now process all extents and xattrs of the inode as if
6170 * they were all new.
6172 ret
= process_all_extents(sctx
);
6175 ret
= process_all_new_xattrs(sctx
);
6179 sctx
->cur_inode_gen
= left_gen
;
6180 sctx
->cur_inode_new
= 0;
6181 sctx
->cur_inode_new_gen
= 0;
6182 sctx
->cur_inode_deleted
= 0;
6183 sctx
->cur_inode_size
= btrfs_inode_size(
6184 sctx
->left_path
->nodes
[0], left_ii
);
6185 sctx
->cur_inode_mode
= btrfs_inode_mode(
6186 sctx
->left_path
->nodes
[0], left_ii
);
6195 * We have to process new refs before deleted refs, but compare_trees gives us
6196 * the new and deleted refs mixed. To fix this, we record the new/deleted refs
6197 * first and later process them in process_recorded_refs.
6198 * For the cur_inode_new_gen case, we skip recording completely because
6199 * changed_inode did already initiate processing of refs. The reason for this is
6200 * that in this case, compare_tree actually compares the refs of 2 different
6201 * inodes. To fix this, process_all_refs is used in changed_inode to handle all
6202 * refs of the right tree as deleted and all refs of the left tree as new.
6204 static int changed_ref(struct send_ctx
*sctx
,
6205 enum btrfs_compare_tree_result result
)
6209 if (sctx
->cur_ino
!= sctx
->cmp_key
->objectid
) {
6210 inconsistent_snapshot_error(sctx
, result
, "reference");
6214 if (!sctx
->cur_inode_new_gen
&&
6215 sctx
->cur_ino
!= BTRFS_FIRST_FREE_OBJECTID
) {
6216 if (result
== BTRFS_COMPARE_TREE_NEW
)
6217 ret
= record_new_ref(sctx
);
6218 else if (result
== BTRFS_COMPARE_TREE_DELETED
)
6219 ret
= record_deleted_ref(sctx
);
6220 else if (result
== BTRFS_COMPARE_TREE_CHANGED
)
6221 ret
= record_changed_ref(sctx
);
6228 * Process new/deleted/changed xattrs. We skip processing in the
6229 * cur_inode_new_gen case because changed_inode did already initiate processing
6230 * of xattrs. The reason is the same as in changed_ref
6232 static int changed_xattr(struct send_ctx
*sctx
,
6233 enum btrfs_compare_tree_result result
)
6237 if (sctx
->cur_ino
!= sctx
->cmp_key
->objectid
) {
6238 inconsistent_snapshot_error(sctx
, result
, "xattr");
6242 if (!sctx
->cur_inode_new_gen
&& !sctx
->cur_inode_deleted
) {
6243 if (result
== BTRFS_COMPARE_TREE_NEW
)
6244 ret
= process_new_xattr(sctx
);
6245 else if (result
== BTRFS_COMPARE_TREE_DELETED
)
6246 ret
= process_deleted_xattr(sctx
);
6247 else if (result
== BTRFS_COMPARE_TREE_CHANGED
)
6248 ret
= process_changed_xattr(sctx
);
6255 * Process new/deleted/changed extents. We skip processing in the
6256 * cur_inode_new_gen case because changed_inode did already initiate processing
6257 * of extents. The reason is the same as in changed_ref
6259 static int changed_extent(struct send_ctx
*sctx
,
6260 enum btrfs_compare_tree_result result
)
6264 if (sctx
->cur_ino
!= sctx
->cmp_key
->objectid
) {
6266 if (result
== BTRFS_COMPARE_TREE_CHANGED
) {
6267 struct extent_buffer
*leaf_l
;
6268 struct extent_buffer
*leaf_r
;
6269 struct btrfs_file_extent_item
*ei_l
;
6270 struct btrfs_file_extent_item
*ei_r
;
6272 leaf_l
= sctx
->left_path
->nodes
[0];
6273 leaf_r
= sctx
->right_path
->nodes
[0];
6274 ei_l
= btrfs_item_ptr(leaf_l
,
6275 sctx
->left_path
->slots
[0],
6276 struct btrfs_file_extent_item
);
6277 ei_r
= btrfs_item_ptr(leaf_r
,
6278 sctx
->right_path
->slots
[0],
6279 struct btrfs_file_extent_item
);
6282 * We may have found an extent item that has changed
6283 * only its disk_bytenr field and the corresponding
6284 * inode item was not updated. This case happens due to
6285 * very specific timings during relocation when a leaf
6286 * that contains file extent items is COWed while
6287 * relocation is ongoing and its in the stage where it
6288 * updates data pointers. So when this happens we can
6289 * safely ignore it since we know it's the same extent,
6290 * but just at different logical and physical locations
6291 * (when an extent is fully replaced with a new one, we
6292 * know the generation number must have changed too,
6293 * since snapshot creation implies committing the current
6294 * transaction, and the inode item must have been updated
6296 * This replacement of the disk_bytenr happens at
6297 * relocation.c:replace_file_extents() through
6298 * relocation.c:btrfs_reloc_cow_block().
6300 if (btrfs_file_extent_generation(leaf_l
, ei_l
) ==
6301 btrfs_file_extent_generation(leaf_r
, ei_r
) &&
6302 btrfs_file_extent_ram_bytes(leaf_l
, ei_l
) ==
6303 btrfs_file_extent_ram_bytes(leaf_r
, ei_r
) &&
6304 btrfs_file_extent_compression(leaf_l
, ei_l
) ==
6305 btrfs_file_extent_compression(leaf_r
, ei_r
) &&
6306 btrfs_file_extent_encryption(leaf_l
, ei_l
) ==
6307 btrfs_file_extent_encryption(leaf_r
, ei_r
) &&
6308 btrfs_file_extent_other_encoding(leaf_l
, ei_l
) ==
6309 btrfs_file_extent_other_encoding(leaf_r
, ei_r
) &&
6310 btrfs_file_extent_type(leaf_l
, ei_l
) ==
6311 btrfs_file_extent_type(leaf_r
, ei_r
) &&
6312 btrfs_file_extent_disk_bytenr(leaf_l
, ei_l
) !=
6313 btrfs_file_extent_disk_bytenr(leaf_r
, ei_r
) &&
6314 btrfs_file_extent_disk_num_bytes(leaf_l
, ei_l
) ==
6315 btrfs_file_extent_disk_num_bytes(leaf_r
, ei_r
) &&
6316 btrfs_file_extent_offset(leaf_l
, ei_l
) ==
6317 btrfs_file_extent_offset(leaf_r
, ei_r
) &&
6318 btrfs_file_extent_num_bytes(leaf_l
, ei_l
) ==
6319 btrfs_file_extent_num_bytes(leaf_r
, ei_r
))
6323 inconsistent_snapshot_error(sctx
, result
, "extent");
6327 if (!sctx
->cur_inode_new_gen
&& !sctx
->cur_inode_deleted
) {
6328 if (result
!= BTRFS_COMPARE_TREE_DELETED
)
6329 ret
= process_extent(sctx
, sctx
->left_path
,
6336 static int dir_changed(struct send_ctx
*sctx
, u64 dir
)
6338 u64 orig_gen
, new_gen
;
6341 ret
= get_inode_info(sctx
->send_root
, dir
, NULL
, &new_gen
, NULL
, NULL
,
6346 ret
= get_inode_info(sctx
->parent_root
, dir
, NULL
, &orig_gen
, NULL
,
6351 return (orig_gen
!= new_gen
) ? 1 : 0;
6354 static int compare_refs(struct send_ctx
*sctx
, struct btrfs_path
*path
,
6355 struct btrfs_key
*key
)
6357 struct btrfs_inode_extref
*extref
;
6358 struct extent_buffer
*leaf
;
6359 u64 dirid
= 0, last_dirid
= 0;
6366 /* Easy case, just check this one dirid */
6367 if (key
->type
== BTRFS_INODE_REF_KEY
) {
6368 dirid
= key
->offset
;
6370 ret
= dir_changed(sctx
, dirid
);
6374 leaf
= path
->nodes
[0];
6375 item_size
= btrfs_item_size_nr(leaf
, path
->slots
[0]);
6376 ptr
= btrfs_item_ptr_offset(leaf
, path
->slots
[0]);
6377 while (cur_offset
< item_size
) {
6378 extref
= (struct btrfs_inode_extref
*)(ptr
+
6380 dirid
= btrfs_inode_extref_parent(leaf
, extref
);
6381 ref_name_len
= btrfs_inode_extref_name_len(leaf
, extref
);
6382 cur_offset
+= ref_name_len
+ sizeof(*extref
);
6383 if (dirid
== last_dirid
)
6385 ret
= dir_changed(sctx
, dirid
);
6395 * Updates compare related fields in sctx and simply forwards to the actual
6396 * changed_xxx functions.
6398 static int changed_cb(struct btrfs_path
*left_path
,
6399 struct btrfs_path
*right_path
,
6400 struct btrfs_key
*key
,
6401 enum btrfs_compare_tree_result result
,
6405 struct send_ctx
*sctx
= ctx
;
6407 if (result
== BTRFS_COMPARE_TREE_SAME
) {
6408 if (key
->type
== BTRFS_INODE_REF_KEY
||
6409 key
->type
== BTRFS_INODE_EXTREF_KEY
) {
6410 ret
= compare_refs(sctx
, left_path
, key
);
6415 } else if (key
->type
== BTRFS_EXTENT_DATA_KEY
) {
6416 return maybe_send_hole(sctx
, left_path
, key
);
6420 result
= BTRFS_COMPARE_TREE_CHANGED
;
6424 sctx
->left_path
= left_path
;
6425 sctx
->right_path
= right_path
;
6426 sctx
->cmp_key
= key
;
6428 ret
= finish_inode_if_needed(sctx
, 0);
6432 /* Ignore non-FS objects */
6433 if (key
->objectid
== BTRFS_FREE_INO_OBJECTID
||
6434 key
->objectid
== BTRFS_FREE_SPACE_OBJECTID
)
6437 if (key
->type
== BTRFS_INODE_ITEM_KEY
) {
6438 ret
= changed_inode(sctx
, result
);
6439 } else if (!sctx
->ignore_cur_inode
) {
6440 if (key
->type
== BTRFS_INODE_REF_KEY
||
6441 key
->type
== BTRFS_INODE_EXTREF_KEY
)
6442 ret
= changed_ref(sctx
, result
);
6443 else if (key
->type
== BTRFS_XATTR_ITEM_KEY
)
6444 ret
= changed_xattr(sctx
, result
);
6445 else if (key
->type
== BTRFS_EXTENT_DATA_KEY
)
6446 ret
= changed_extent(sctx
, result
);
6453 static int full_send_tree(struct send_ctx
*sctx
)
6456 struct btrfs_root
*send_root
= sctx
->send_root
;
6457 struct btrfs_key key
;
6458 struct btrfs_path
*path
;
6459 struct extent_buffer
*eb
;
6462 path
= alloc_path_for_send();
6466 key
.objectid
= BTRFS_FIRST_FREE_OBJECTID
;
6467 key
.type
= BTRFS_INODE_ITEM_KEY
;
6470 ret
= btrfs_search_slot_for_read(send_root
, &key
, path
, 1, 0);
6477 eb
= path
->nodes
[0];
6478 slot
= path
->slots
[0];
6479 btrfs_item_key_to_cpu(eb
, &key
, slot
);
6481 ret
= changed_cb(path
, NULL
, &key
,
6482 BTRFS_COMPARE_TREE_NEW
, sctx
);
6486 ret
= btrfs_next_item(send_root
, path
);
6496 ret
= finish_inode_if_needed(sctx
, 1);
6499 btrfs_free_path(path
);
6503 static int send_subvol(struct send_ctx
*sctx
)
6507 if (!(sctx
->flags
& BTRFS_SEND_FLAG_OMIT_STREAM_HEADER
)) {
6508 ret
= send_header(sctx
);
6513 ret
= send_subvol_begin(sctx
);
6517 if (sctx
->parent_root
) {
6518 ret
= btrfs_compare_trees(sctx
->send_root
, sctx
->parent_root
,
6522 ret
= finish_inode_if_needed(sctx
, 1);
6526 ret
= full_send_tree(sctx
);
6532 free_recorded_refs(sctx
);
6537 * If orphan cleanup did remove any orphans from a root, it means the tree
6538 * was modified and therefore the commit root is not the same as the current
6539 * root anymore. This is a problem, because send uses the commit root and
6540 * therefore can see inode items that don't exist in the current root anymore,
6541 * and for example make calls to btrfs_iget, which will do tree lookups based
6542 * on the current root and not on the commit root. Those lookups will fail,
6543 * returning a -ESTALE error, and making send fail with that error. So make
6544 * sure a send does not see any orphans we have just removed, and that it will
6545 * see the same inodes regardless of whether a transaction commit happened
6546 * before it started (meaning that the commit root will be the same as the
6547 * current root) or not.
6549 static int ensure_commit_roots_uptodate(struct send_ctx
*sctx
)
6552 struct btrfs_trans_handle
*trans
= NULL
;
6555 if (sctx
->parent_root
&&
6556 sctx
->parent_root
->node
!= sctx
->parent_root
->commit_root
)
6559 for (i
= 0; i
< sctx
->clone_roots_cnt
; i
++)
6560 if (sctx
->clone_roots
[i
].root
->node
!=
6561 sctx
->clone_roots
[i
].root
->commit_root
)
6565 return btrfs_end_transaction(trans
);
6570 /* Use any root, all fs roots will get their commit roots updated. */
6572 trans
= btrfs_join_transaction(sctx
->send_root
);
6574 return PTR_ERR(trans
);
6578 return btrfs_commit_transaction(trans
);
6581 static void btrfs_root_dec_send_in_progress(struct btrfs_root
* root
)
6583 spin_lock(&root
->root_item_lock
);
6584 root
->send_in_progress
--;
6586 * Not much left to do, we don't know why it's unbalanced and
6587 * can't blindly reset it to 0.
6589 if (root
->send_in_progress
< 0)
6590 btrfs_err(root
->fs_info
,
6591 "send_in_progress unbalanced %d root %llu",
6592 root
->send_in_progress
, root
->root_key
.objectid
);
6593 spin_unlock(&root
->root_item_lock
);
6596 long btrfs_ioctl_send(struct file
*mnt_file
, struct btrfs_ioctl_send_args
*arg
)
6599 struct btrfs_root
*send_root
= BTRFS_I(file_inode(mnt_file
))->root
;
6600 struct btrfs_fs_info
*fs_info
= send_root
->fs_info
;
6601 struct btrfs_root
*clone_root
;
6602 struct btrfs_key key
;
6603 struct send_ctx
*sctx
= NULL
;
6605 u64
*clone_sources_tmp
= NULL
;
6606 int clone_sources_to_rollback
= 0;
6607 unsigned alloc_size
;
6608 int sort_clone_roots
= 0;
6611 if (!capable(CAP_SYS_ADMIN
))
6615 * The subvolume must remain read-only during send, protect against
6616 * making it RW. This also protects against deletion.
6618 spin_lock(&send_root
->root_item_lock
);
6619 send_root
->send_in_progress
++;
6620 spin_unlock(&send_root
->root_item_lock
);
6623 * This is done when we lookup the root, it should already be complete
6624 * by the time we get here.
6626 WARN_ON(send_root
->orphan_cleanup_state
!= ORPHAN_CLEANUP_DONE
);
6629 * Userspace tools do the checks and warn the user if it's
6632 if (!btrfs_root_readonly(send_root
)) {
6638 * Check that we don't overflow at later allocations, we request
6639 * clone_sources_count + 1 items, and compare to unsigned long inside
6642 if (arg
->clone_sources_count
>
6643 ULONG_MAX
/ sizeof(struct clone_root
) - 1) {
6648 if (!access_ok(VERIFY_READ
, arg
->clone_sources
,
6649 sizeof(*arg
->clone_sources
) *
6650 arg
->clone_sources_count
)) {
6655 if (arg
->flags
& ~BTRFS_SEND_FLAG_MASK
) {
6660 sctx
= kzalloc(sizeof(struct send_ctx
), GFP_KERNEL
);
6666 INIT_LIST_HEAD(&sctx
->new_refs
);
6667 INIT_LIST_HEAD(&sctx
->deleted_refs
);
6668 INIT_RADIX_TREE(&sctx
->name_cache
, GFP_KERNEL
);
6669 INIT_LIST_HEAD(&sctx
->name_cache_list
);
6671 sctx
->flags
= arg
->flags
;
6673 sctx
->send_filp
= fget(arg
->send_fd
);
6674 if (!sctx
->send_filp
) {
6679 sctx
->send_root
= send_root
;
6681 * Unlikely but possible, if the subvolume is marked for deletion but
6682 * is slow to remove the directory entry, send can still be started
6684 if (btrfs_root_dead(sctx
->send_root
)) {
6689 sctx
->clone_roots_cnt
= arg
->clone_sources_count
;
6691 sctx
->send_max_size
= BTRFS_SEND_BUF_SIZE
;
6692 sctx
->send_buf
= kvmalloc(sctx
->send_max_size
, GFP_KERNEL
);
6693 if (!sctx
->send_buf
) {
6698 sctx
->read_buf
= kvmalloc(BTRFS_SEND_READ_SIZE
, GFP_KERNEL
);
6699 if (!sctx
->read_buf
) {
6704 sctx
->pending_dir_moves
= RB_ROOT
;
6705 sctx
->waiting_dir_moves
= RB_ROOT
;
6706 sctx
->orphan_dirs
= RB_ROOT
;
6708 alloc_size
= sizeof(struct clone_root
) * (arg
->clone_sources_count
+ 1);
6710 sctx
->clone_roots
= kzalloc(alloc_size
, GFP_KERNEL
);
6711 if (!sctx
->clone_roots
) {
6716 alloc_size
= arg
->clone_sources_count
* sizeof(*arg
->clone_sources
);
6718 if (arg
->clone_sources_count
) {
6719 clone_sources_tmp
= kvmalloc(alloc_size
, GFP_KERNEL
);
6720 if (!clone_sources_tmp
) {
6725 ret
= copy_from_user(clone_sources_tmp
, arg
->clone_sources
,
6732 for (i
= 0; i
< arg
->clone_sources_count
; i
++) {
6733 key
.objectid
= clone_sources_tmp
[i
];
6734 key
.type
= BTRFS_ROOT_ITEM_KEY
;
6735 key
.offset
= (u64
)-1;
6737 index
= srcu_read_lock(&fs_info
->subvol_srcu
);
6739 clone_root
= btrfs_read_fs_root_no_name(fs_info
, &key
);
6740 if (IS_ERR(clone_root
)) {
6741 srcu_read_unlock(&fs_info
->subvol_srcu
, index
);
6742 ret
= PTR_ERR(clone_root
);
6745 spin_lock(&clone_root
->root_item_lock
);
6746 if (!btrfs_root_readonly(clone_root
) ||
6747 btrfs_root_dead(clone_root
)) {
6748 spin_unlock(&clone_root
->root_item_lock
);
6749 srcu_read_unlock(&fs_info
->subvol_srcu
, index
);
6753 clone_root
->send_in_progress
++;
6754 spin_unlock(&clone_root
->root_item_lock
);
6755 srcu_read_unlock(&fs_info
->subvol_srcu
, index
);
6757 sctx
->clone_roots
[i
].root
= clone_root
;
6758 clone_sources_to_rollback
= i
+ 1;
6760 kvfree(clone_sources_tmp
);
6761 clone_sources_tmp
= NULL
;
6764 if (arg
->parent_root
) {
6765 key
.objectid
= arg
->parent_root
;
6766 key
.type
= BTRFS_ROOT_ITEM_KEY
;
6767 key
.offset
= (u64
)-1;
6769 index
= srcu_read_lock(&fs_info
->subvol_srcu
);
6771 sctx
->parent_root
= btrfs_read_fs_root_no_name(fs_info
, &key
);
6772 if (IS_ERR(sctx
->parent_root
)) {
6773 srcu_read_unlock(&fs_info
->subvol_srcu
, index
);
6774 ret
= PTR_ERR(sctx
->parent_root
);
6778 spin_lock(&sctx
->parent_root
->root_item_lock
);
6779 sctx
->parent_root
->send_in_progress
++;
6780 if (!btrfs_root_readonly(sctx
->parent_root
) ||
6781 btrfs_root_dead(sctx
->parent_root
)) {
6782 spin_unlock(&sctx
->parent_root
->root_item_lock
);
6783 srcu_read_unlock(&fs_info
->subvol_srcu
, index
);
6787 spin_unlock(&sctx
->parent_root
->root_item_lock
);
6789 srcu_read_unlock(&fs_info
->subvol_srcu
, index
);
6793 * Clones from send_root are allowed, but only if the clone source
6794 * is behind the current send position. This is checked while searching
6795 * for possible clone sources.
6797 sctx
->clone_roots
[sctx
->clone_roots_cnt
++].root
= sctx
->send_root
;
6799 /* We do a bsearch later */
6800 sort(sctx
->clone_roots
, sctx
->clone_roots_cnt
,
6801 sizeof(*sctx
->clone_roots
), __clone_root_cmp_sort
,
6803 sort_clone_roots
= 1;
6805 ret
= ensure_commit_roots_uptodate(sctx
);
6809 current
->journal_info
= BTRFS_SEND_TRANS_STUB
;
6810 ret
= send_subvol(sctx
);
6811 current
->journal_info
= NULL
;
6815 if (!(sctx
->flags
& BTRFS_SEND_FLAG_OMIT_END_CMD
)) {
6816 ret
= begin_cmd(sctx
, BTRFS_SEND_C_END
);
6819 ret
= send_cmd(sctx
);
6825 WARN_ON(sctx
&& !ret
&& !RB_EMPTY_ROOT(&sctx
->pending_dir_moves
));
6826 while (sctx
&& !RB_EMPTY_ROOT(&sctx
->pending_dir_moves
)) {
6828 struct pending_dir_move
*pm
;
6830 n
= rb_first(&sctx
->pending_dir_moves
);
6831 pm
= rb_entry(n
, struct pending_dir_move
, node
);
6832 while (!list_empty(&pm
->list
)) {
6833 struct pending_dir_move
*pm2
;
6835 pm2
= list_first_entry(&pm
->list
,
6836 struct pending_dir_move
, list
);
6837 free_pending_move(sctx
, pm2
);
6839 free_pending_move(sctx
, pm
);
6842 WARN_ON(sctx
&& !ret
&& !RB_EMPTY_ROOT(&sctx
->waiting_dir_moves
));
6843 while (sctx
&& !RB_EMPTY_ROOT(&sctx
->waiting_dir_moves
)) {
6845 struct waiting_dir_move
*dm
;
6847 n
= rb_first(&sctx
->waiting_dir_moves
);
6848 dm
= rb_entry(n
, struct waiting_dir_move
, node
);
6849 rb_erase(&dm
->node
, &sctx
->waiting_dir_moves
);
6853 WARN_ON(sctx
&& !ret
&& !RB_EMPTY_ROOT(&sctx
->orphan_dirs
));
6854 while (sctx
&& !RB_EMPTY_ROOT(&sctx
->orphan_dirs
)) {
6856 struct orphan_dir_info
*odi
;
6858 n
= rb_first(&sctx
->orphan_dirs
);
6859 odi
= rb_entry(n
, struct orphan_dir_info
, node
);
6860 free_orphan_dir_info(sctx
, odi
);
6863 if (sort_clone_roots
) {
6864 for (i
= 0; i
< sctx
->clone_roots_cnt
; i
++)
6865 btrfs_root_dec_send_in_progress(
6866 sctx
->clone_roots
[i
].root
);
6868 for (i
= 0; sctx
&& i
< clone_sources_to_rollback
; i
++)
6869 btrfs_root_dec_send_in_progress(
6870 sctx
->clone_roots
[i
].root
);
6872 btrfs_root_dec_send_in_progress(send_root
);
6874 if (sctx
&& !IS_ERR_OR_NULL(sctx
->parent_root
))
6875 btrfs_root_dec_send_in_progress(sctx
->parent_root
);
6877 kvfree(clone_sources_tmp
);
6880 if (sctx
->send_filp
)
6881 fput(sctx
->send_filp
);
6883 kvfree(sctx
->clone_roots
);
6884 kvfree(sctx
->send_buf
);
6885 kvfree(sctx
->read_buf
);
6887 name_cache_free(sctx
);