1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2012 Alexander Block. All rights reserved.
6 #include <linux/bsearch.h>
8 #include <linux/file.h>
9 #include <linux/sort.h>
10 #include <linux/mount.h>
11 #include <linux/xattr.h>
12 #include <linux/posix_acl_xattr.h>
13 #include <linux/radix-tree.h>
14 #include <linux/vmalloc.h>
15 #include <linux/string.h>
16 #include <linux/compat.h>
17 #include <linux/crc32c.h>
23 #include "btrfs_inode.h"
24 #include "transaction.h"
25 #include "compression.h"
28 * A fs_path is a helper to dynamically build path names with unknown size.
29 * It reallocates the internal buffer on demand.
30 * It allows fast adding of path elements on the right side (normal path) and
31 * fast adding to the left side (reversed path). A reversed path can also be
32 * unreversed if needed.
41 unsigned short buf_len
:15;
42 unsigned short reversed
:1;
46 * Average path length does not exceed 200 bytes, we'll have
47 * better packing in the slab and higher chance to satisfy
48 * a allocation later during send.
53 #define FS_PATH_INLINE_SIZE \
54 (sizeof(struct fs_path) - offsetof(struct fs_path, inline_buf))
57 /* reused for each extent */
59 struct btrfs_root
*root
;
66 #define SEND_CTX_MAX_NAME_CACHE_SIZE 128
67 #define SEND_CTX_NAME_CACHE_CLEAN_SIZE (SEND_CTX_MAX_NAME_CACHE_SIZE * 2)
70 struct file
*send_filp
;
76 u64 cmd_send_size
[BTRFS_SEND_C_MAX
+ 1];
77 u64 flags
; /* 'flags' member of btrfs_ioctl_send_args is u64 */
79 struct btrfs_root
*send_root
;
80 struct btrfs_root
*parent_root
;
81 struct clone_root
*clone_roots
;
84 /* current state of the compare_tree call */
85 struct btrfs_path
*left_path
;
86 struct btrfs_path
*right_path
;
87 struct btrfs_key
*cmp_key
;
90 * infos of the currently processed inode. In case of deleted inodes,
91 * these are the values from the deleted inode.
96 int cur_inode_new_gen
;
97 int cur_inode_deleted
;
101 u64 cur_inode_last_extent
;
102 u64 cur_inode_next_write_offset
;
106 struct list_head new_refs
;
107 struct list_head deleted_refs
;
109 struct radix_tree_root name_cache
;
110 struct list_head name_cache_list
;
113 struct file_ra_state ra
;
118 * We process inodes by their increasing order, so if before an
119 * incremental send we reverse the parent/child relationship of
120 * directories such that a directory with a lower inode number was
121 * the parent of a directory with a higher inode number, and the one
122 * becoming the new parent got renamed too, we can't rename/move the
123 * directory with lower inode number when we finish processing it - we
124 * must process the directory with higher inode number first, then
125 * rename/move it and then rename/move the directory with lower inode
126 * number. Example follows.
128 * Tree state when the first send was performed:
140 * Tree state when the second (incremental) send is performed:
149 * The sequence of steps that lead to the second state was:
151 * mv /a/b/c/d /a/b/c2/d2
152 * mv /a/b/c /a/b/c2/d2/cc
154 * "c" has lower inode number, but we can't move it (2nd mv operation)
155 * before we move "d", which has higher inode number.
157 * So we just memorize which move/rename operations must be performed
158 * later when their respective parent is processed and moved/renamed.
161 /* Indexed by parent directory inode number. */
162 struct rb_root pending_dir_moves
;
165 * Reverse index, indexed by the inode number of a directory that
166 * is waiting for the move/rename of its immediate parent before its
167 * own move/rename can be performed.
169 struct rb_root waiting_dir_moves
;
172 * A directory that is going to be rm'ed might have a child directory
173 * which is in the pending directory moves index above. In this case,
174 * the directory can only be removed after the move/rename of its child
175 * is performed. Example:
195 * Sequence of steps that lead to the send snapshot:
196 * rm -f /a/b/c/foo.txt
198 * mv /a/b/c/x /a/b/YY
201 * When the child is processed, its move/rename is delayed until its
202 * parent is processed (as explained above), but all other operations
203 * like update utimes, chown, chgrp, etc, are performed and the paths
204 * that it uses for those operations must use the orphanized name of
205 * its parent (the directory we're going to rm later), so we need to
206 * memorize that name.
208 * Indexed by the inode number of the directory to be deleted.
210 struct rb_root orphan_dirs
;
213 struct pending_dir_move
{
215 struct list_head list
;
219 struct list_head update_refs
;
222 struct waiting_dir_move
{
226 * There might be some directory that could not be removed because it
227 * was waiting for this directory inode to be moved first. Therefore
228 * after this directory is moved, we can try to rmdir the ino rmdir_ino.
234 struct orphan_dir_info
{
240 struct name_cache_entry
{
241 struct list_head list
;
243 * radix_tree has only 32bit entries but we need to handle 64bit inums.
244 * We use the lower 32bit of the 64bit inum to store it in the tree. If
245 * more then one inum would fall into the same entry, we use radix_list
246 * to store the additional entries. radix_list is also used to store
247 * entries where two entries have the same inum but different
250 struct list_head radix_list
;
256 int need_later_update
;
262 static void inconsistent_snapshot_error(struct send_ctx
*sctx
,
263 enum btrfs_compare_tree_result result
,
266 const char *result_string
;
269 case BTRFS_COMPARE_TREE_NEW
:
270 result_string
= "new";
272 case BTRFS_COMPARE_TREE_DELETED
:
273 result_string
= "deleted";
275 case BTRFS_COMPARE_TREE_CHANGED
:
276 result_string
= "updated";
278 case BTRFS_COMPARE_TREE_SAME
:
280 result_string
= "unchanged";
284 result_string
= "unexpected";
287 btrfs_err(sctx
->send_root
->fs_info
,
288 "Send: inconsistent snapshot, found %s %s for inode %llu without updated inode item, send root is %llu, parent root is %llu",
289 result_string
, what
, sctx
->cmp_key
->objectid
,
290 sctx
->send_root
->root_key
.objectid
,
292 sctx
->parent_root
->root_key
.objectid
: 0));
295 static int is_waiting_for_move(struct send_ctx
*sctx
, u64 ino
);
297 static struct waiting_dir_move
*
298 get_waiting_dir_move(struct send_ctx
*sctx
, u64 ino
);
300 static int is_waiting_for_rm(struct send_ctx
*sctx
, u64 dir_ino
);
302 static int need_send_hole(struct send_ctx
*sctx
)
304 return (sctx
->parent_root
&& !sctx
->cur_inode_new
&&
305 !sctx
->cur_inode_new_gen
&& !sctx
->cur_inode_deleted
&&
306 S_ISREG(sctx
->cur_inode_mode
));
309 static void fs_path_reset(struct fs_path
*p
)
312 p
->start
= p
->buf
+ p
->buf_len
- 1;
322 static struct fs_path
*fs_path_alloc(void)
326 p
= kmalloc(sizeof(*p
), GFP_KERNEL
);
330 p
->buf
= p
->inline_buf
;
331 p
->buf_len
= FS_PATH_INLINE_SIZE
;
336 static struct fs_path
*fs_path_alloc_reversed(void)
348 static void fs_path_free(struct fs_path
*p
)
352 if (p
->buf
!= p
->inline_buf
)
357 static int fs_path_len(struct fs_path
*p
)
359 return p
->end
- p
->start
;
362 static int fs_path_ensure_buf(struct fs_path
*p
, int len
)
370 if (p
->buf_len
>= len
)
373 if (len
> PATH_MAX
) {
378 path_len
= p
->end
- p
->start
;
379 old_buf_len
= p
->buf_len
;
382 * First time the inline_buf does not suffice
384 if (p
->buf
== p
->inline_buf
) {
385 tmp_buf
= kmalloc(len
, GFP_KERNEL
);
387 memcpy(tmp_buf
, p
->buf
, old_buf_len
);
389 tmp_buf
= krealloc(p
->buf
, len
, GFP_KERNEL
);
395 * The real size of the buffer is bigger, this will let the fast path
396 * happen most of the time
398 p
->buf_len
= ksize(p
->buf
);
401 tmp_buf
= p
->buf
+ old_buf_len
- path_len
- 1;
402 p
->end
= p
->buf
+ p
->buf_len
- 1;
403 p
->start
= p
->end
- path_len
;
404 memmove(p
->start
, tmp_buf
, path_len
+ 1);
407 p
->end
= p
->start
+ path_len
;
412 static int fs_path_prepare_for_add(struct fs_path
*p
, int name_len
,
418 new_len
= p
->end
- p
->start
+ name_len
;
419 if (p
->start
!= p
->end
)
421 ret
= fs_path_ensure_buf(p
, new_len
);
426 if (p
->start
!= p
->end
)
428 p
->start
-= name_len
;
429 *prepared
= p
->start
;
431 if (p
->start
!= p
->end
)
442 static int fs_path_add(struct fs_path
*p
, const char *name
, int name_len
)
447 ret
= fs_path_prepare_for_add(p
, name_len
, &prepared
);
450 memcpy(prepared
, name
, name_len
);
456 static int fs_path_add_path(struct fs_path
*p
, struct fs_path
*p2
)
461 ret
= fs_path_prepare_for_add(p
, p2
->end
- p2
->start
, &prepared
);
464 memcpy(prepared
, p2
->start
, p2
->end
- p2
->start
);
470 static int fs_path_add_from_extent_buffer(struct fs_path
*p
,
471 struct extent_buffer
*eb
,
472 unsigned long off
, int len
)
477 ret
= fs_path_prepare_for_add(p
, len
, &prepared
);
481 read_extent_buffer(eb
, prepared
, off
, len
);
487 static int fs_path_copy(struct fs_path
*p
, struct fs_path
*from
)
491 p
->reversed
= from
->reversed
;
494 ret
= fs_path_add_path(p
, from
);
500 static void fs_path_unreverse(struct fs_path
*p
)
509 len
= p
->end
- p
->start
;
511 p
->end
= p
->start
+ len
;
512 memmove(p
->start
, tmp
, len
+ 1);
516 static struct btrfs_path
*alloc_path_for_send(void)
518 struct btrfs_path
*path
;
520 path
= btrfs_alloc_path();
523 path
->search_commit_root
= 1;
524 path
->skip_locking
= 1;
525 path
->need_commit_sem
= 1;
529 static int write_buf(struct file
*filp
, const void *buf
, u32 len
, loff_t
*off
)
535 ret
= kernel_write(filp
, buf
+ pos
, len
- pos
, off
);
536 /* TODO handle that correctly */
537 /*if (ret == -ERESTARTSYS) {
551 static int tlv_put(struct send_ctx
*sctx
, u16 attr
, const void *data
, int len
)
553 struct btrfs_tlv_header
*hdr
;
554 int total_len
= sizeof(*hdr
) + len
;
555 int left
= sctx
->send_max_size
- sctx
->send_size
;
557 if (unlikely(left
< total_len
))
560 hdr
= (struct btrfs_tlv_header
*) (sctx
->send_buf
+ sctx
->send_size
);
561 hdr
->tlv_type
= cpu_to_le16(attr
);
562 hdr
->tlv_len
= cpu_to_le16(len
);
563 memcpy(hdr
+ 1, data
, len
);
564 sctx
->send_size
+= total_len
;
569 #define TLV_PUT_DEFINE_INT(bits) \
570 static int tlv_put_u##bits(struct send_ctx *sctx, \
571 u##bits attr, u##bits value) \
573 __le##bits __tmp = cpu_to_le##bits(value); \
574 return tlv_put(sctx, attr, &__tmp, sizeof(__tmp)); \
577 TLV_PUT_DEFINE_INT(64)
579 static int tlv_put_string(struct send_ctx
*sctx
, u16 attr
,
580 const char *str
, int len
)
584 return tlv_put(sctx
, attr
, str
, len
);
587 static int tlv_put_uuid(struct send_ctx
*sctx
, u16 attr
,
590 return tlv_put(sctx
, attr
, uuid
, BTRFS_UUID_SIZE
);
593 static int tlv_put_btrfs_timespec(struct send_ctx
*sctx
, u16 attr
,
594 struct extent_buffer
*eb
,
595 struct btrfs_timespec
*ts
)
597 struct btrfs_timespec bts
;
598 read_extent_buffer(eb
, &bts
, (unsigned long)ts
, sizeof(bts
));
599 return tlv_put(sctx
, attr
, &bts
, sizeof(bts
));
603 #define TLV_PUT(sctx, attrtype, data, attrlen) \
605 ret = tlv_put(sctx, attrtype, data, attrlen); \
607 goto tlv_put_failure; \
610 #define TLV_PUT_INT(sctx, attrtype, bits, value) \
612 ret = tlv_put_u##bits(sctx, attrtype, value); \
614 goto tlv_put_failure; \
617 #define TLV_PUT_U8(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 8, data)
618 #define TLV_PUT_U16(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 16, data)
619 #define TLV_PUT_U32(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 32, data)
620 #define TLV_PUT_U64(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 64, data)
621 #define TLV_PUT_STRING(sctx, attrtype, str, len) \
623 ret = tlv_put_string(sctx, attrtype, str, len); \
625 goto tlv_put_failure; \
627 #define TLV_PUT_PATH(sctx, attrtype, p) \
629 ret = tlv_put_string(sctx, attrtype, p->start, \
630 p->end - p->start); \
632 goto tlv_put_failure; \
634 #define TLV_PUT_UUID(sctx, attrtype, uuid) \
636 ret = tlv_put_uuid(sctx, attrtype, uuid); \
638 goto tlv_put_failure; \
640 #define TLV_PUT_BTRFS_TIMESPEC(sctx, attrtype, eb, ts) \
642 ret = tlv_put_btrfs_timespec(sctx, attrtype, eb, ts); \
644 goto tlv_put_failure; \
647 static int send_header(struct send_ctx
*sctx
)
649 struct btrfs_stream_header hdr
;
651 strcpy(hdr
.magic
, BTRFS_SEND_STREAM_MAGIC
);
652 hdr
.version
= cpu_to_le32(BTRFS_SEND_STREAM_VERSION
);
654 return write_buf(sctx
->send_filp
, &hdr
, sizeof(hdr
),
659 * For each command/item we want to send to userspace, we call this function.
661 static int begin_cmd(struct send_ctx
*sctx
, int cmd
)
663 struct btrfs_cmd_header
*hdr
;
665 if (WARN_ON(!sctx
->send_buf
))
668 BUG_ON(sctx
->send_size
);
670 sctx
->send_size
+= sizeof(*hdr
);
671 hdr
= (struct btrfs_cmd_header
*)sctx
->send_buf
;
672 hdr
->cmd
= cpu_to_le16(cmd
);
677 static int send_cmd(struct send_ctx
*sctx
)
680 struct btrfs_cmd_header
*hdr
;
683 hdr
= (struct btrfs_cmd_header
*)sctx
->send_buf
;
684 hdr
->len
= cpu_to_le32(sctx
->send_size
- sizeof(*hdr
));
687 crc
= crc32c(0, (unsigned char *)sctx
->send_buf
, sctx
->send_size
);
688 hdr
->crc
= cpu_to_le32(crc
);
690 ret
= write_buf(sctx
->send_filp
, sctx
->send_buf
, sctx
->send_size
,
693 sctx
->total_send_size
+= sctx
->send_size
;
694 sctx
->cmd_send_size
[le16_to_cpu(hdr
->cmd
)] += sctx
->send_size
;
701 * Sends a move instruction to user space
703 static int send_rename(struct send_ctx
*sctx
,
704 struct fs_path
*from
, struct fs_path
*to
)
706 struct btrfs_fs_info
*fs_info
= sctx
->send_root
->fs_info
;
709 btrfs_debug(fs_info
, "send_rename %s -> %s", from
->start
, to
->start
);
711 ret
= begin_cmd(sctx
, BTRFS_SEND_C_RENAME
);
715 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH
, from
);
716 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH_TO
, to
);
718 ret
= send_cmd(sctx
);
726 * Sends a link instruction to user space
728 static int send_link(struct send_ctx
*sctx
,
729 struct fs_path
*path
, struct fs_path
*lnk
)
731 struct btrfs_fs_info
*fs_info
= sctx
->send_root
->fs_info
;
734 btrfs_debug(fs_info
, "send_link %s -> %s", path
->start
, lnk
->start
);
736 ret
= begin_cmd(sctx
, BTRFS_SEND_C_LINK
);
740 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH
, path
);
741 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH_LINK
, lnk
);
743 ret
= send_cmd(sctx
);
751 * Sends an unlink instruction to user space
753 static int send_unlink(struct send_ctx
*sctx
, struct fs_path
*path
)
755 struct btrfs_fs_info
*fs_info
= sctx
->send_root
->fs_info
;
758 btrfs_debug(fs_info
, "send_unlink %s", path
->start
);
760 ret
= begin_cmd(sctx
, BTRFS_SEND_C_UNLINK
);
764 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH
, path
);
766 ret
= send_cmd(sctx
);
774 * Sends a rmdir instruction to user space
776 static int send_rmdir(struct send_ctx
*sctx
, struct fs_path
*path
)
778 struct btrfs_fs_info
*fs_info
= sctx
->send_root
->fs_info
;
781 btrfs_debug(fs_info
, "send_rmdir %s", path
->start
);
783 ret
= begin_cmd(sctx
, BTRFS_SEND_C_RMDIR
);
787 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH
, path
);
789 ret
= send_cmd(sctx
);
797 * Helper function to retrieve some fields from an inode item.
799 static int __get_inode_info(struct btrfs_root
*root
, struct btrfs_path
*path
,
800 u64 ino
, u64
*size
, u64
*gen
, u64
*mode
, u64
*uid
,
804 struct btrfs_inode_item
*ii
;
805 struct btrfs_key key
;
808 key
.type
= BTRFS_INODE_ITEM_KEY
;
810 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
817 ii
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
818 struct btrfs_inode_item
);
820 *size
= btrfs_inode_size(path
->nodes
[0], ii
);
822 *gen
= btrfs_inode_generation(path
->nodes
[0], ii
);
824 *mode
= btrfs_inode_mode(path
->nodes
[0], ii
);
826 *uid
= btrfs_inode_uid(path
->nodes
[0], ii
);
828 *gid
= btrfs_inode_gid(path
->nodes
[0], ii
);
830 *rdev
= btrfs_inode_rdev(path
->nodes
[0], ii
);
835 static int get_inode_info(struct btrfs_root
*root
,
836 u64 ino
, u64
*size
, u64
*gen
,
837 u64
*mode
, u64
*uid
, u64
*gid
,
840 struct btrfs_path
*path
;
843 path
= alloc_path_for_send();
846 ret
= __get_inode_info(root
, path
, ino
, size
, gen
, mode
, uid
, gid
,
848 btrfs_free_path(path
);
852 typedef int (*iterate_inode_ref_t
)(int num
, u64 dir
, int index
,
857 * Helper function to iterate the entries in ONE btrfs_inode_ref or
858 * btrfs_inode_extref.
859 * The iterate callback may return a non zero value to stop iteration. This can
860 * be a negative value for error codes or 1 to simply stop it.
862 * path must point to the INODE_REF or INODE_EXTREF when called.
864 static int iterate_inode_ref(struct btrfs_root
*root
, struct btrfs_path
*path
,
865 struct btrfs_key
*found_key
, int resolve
,
866 iterate_inode_ref_t iterate
, void *ctx
)
868 struct extent_buffer
*eb
= path
->nodes
[0];
869 struct btrfs_item
*item
;
870 struct btrfs_inode_ref
*iref
;
871 struct btrfs_inode_extref
*extref
;
872 struct btrfs_path
*tmp_path
;
876 int slot
= path
->slots
[0];
883 unsigned long name_off
;
884 unsigned long elem_size
;
887 p
= fs_path_alloc_reversed();
891 tmp_path
= alloc_path_for_send();
898 if (found_key
->type
== BTRFS_INODE_REF_KEY
) {
899 ptr
= (unsigned long)btrfs_item_ptr(eb
, slot
,
900 struct btrfs_inode_ref
);
901 item
= btrfs_item_nr(slot
);
902 total
= btrfs_item_size(eb
, item
);
903 elem_size
= sizeof(*iref
);
905 ptr
= btrfs_item_ptr_offset(eb
, slot
);
906 total
= btrfs_item_size_nr(eb
, slot
);
907 elem_size
= sizeof(*extref
);
910 while (cur
< total
) {
913 if (found_key
->type
== BTRFS_INODE_REF_KEY
) {
914 iref
= (struct btrfs_inode_ref
*)(ptr
+ cur
);
915 name_len
= btrfs_inode_ref_name_len(eb
, iref
);
916 name_off
= (unsigned long)(iref
+ 1);
917 index
= btrfs_inode_ref_index(eb
, iref
);
918 dir
= found_key
->offset
;
920 extref
= (struct btrfs_inode_extref
*)(ptr
+ cur
);
921 name_len
= btrfs_inode_extref_name_len(eb
, extref
);
922 name_off
= (unsigned long)&extref
->name
;
923 index
= btrfs_inode_extref_index(eb
, extref
);
924 dir
= btrfs_inode_extref_parent(eb
, extref
);
928 start
= btrfs_ref_to_path(root
, tmp_path
, name_len
,
932 ret
= PTR_ERR(start
);
935 if (start
< p
->buf
) {
936 /* overflow , try again with larger buffer */
937 ret
= fs_path_ensure_buf(p
,
938 p
->buf_len
+ p
->buf
- start
);
941 start
= btrfs_ref_to_path(root
, tmp_path
,
946 ret
= PTR_ERR(start
);
949 BUG_ON(start
< p
->buf
);
953 ret
= fs_path_add_from_extent_buffer(p
, eb
, name_off
,
959 cur
+= elem_size
+ name_len
;
960 ret
= iterate(num
, dir
, index
, p
, ctx
);
967 btrfs_free_path(tmp_path
);
972 typedef int (*iterate_dir_item_t
)(int num
, struct btrfs_key
*di_key
,
973 const char *name
, int name_len
,
974 const char *data
, int data_len
,
978 * Helper function to iterate the entries in ONE btrfs_dir_item.
979 * The iterate callback may return a non zero value to stop iteration. This can
980 * be a negative value for error codes or 1 to simply stop it.
982 * path must point to the dir item when called.
984 static int iterate_dir_item(struct btrfs_root
*root
, struct btrfs_path
*path
,
985 iterate_dir_item_t iterate
, void *ctx
)
988 struct extent_buffer
*eb
;
989 struct btrfs_item
*item
;
990 struct btrfs_dir_item
*di
;
991 struct btrfs_key di_key
;
1004 * Start with a small buffer (1 page). If later we end up needing more
1005 * space, which can happen for xattrs on a fs with a leaf size greater
1006 * then the page size, attempt to increase the buffer. Typically xattr
1010 buf
= kmalloc(buf_len
, GFP_KERNEL
);
1016 eb
= path
->nodes
[0];
1017 slot
= path
->slots
[0];
1018 item
= btrfs_item_nr(slot
);
1019 di
= btrfs_item_ptr(eb
, slot
, struct btrfs_dir_item
);
1022 total
= btrfs_item_size(eb
, item
);
1025 while (cur
< total
) {
1026 name_len
= btrfs_dir_name_len(eb
, di
);
1027 data_len
= btrfs_dir_data_len(eb
, di
);
1028 type
= btrfs_dir_type(eb
, di
);
1029 btrfs_dir_item_key_to_cpu(eb
, di
, &di_key
);
1031 if (type
== BTRFS_FT_XATTR
) {
1032 if (name_len
> XATTR_NAME_MAX
) {
1033 ret
= -ENAMETOOLONG
;
1036 if (name_len
+ data_len
>
1037 BTRFS_MAX_XATTR_SIZE(root
->fs_info
)) {
1045 if (name_len
+ data_len
> PATH_MAX
) {
1046 ret
= -ENAMETOOLONG
;
1051 if (name_len
+ data_len
> buf_len
) {
1052 buf_len
= name_len
+ data_len
;
1053 if (is_vmalloc_addr(buf
)) {
1057 char *tmp
= krealloc(buf
, buf_len
,
1058 GFP_KERNEL
| __GFP_NOWARN
);
1065 buf
= kvmalloc(buf_len
, GFP_KERNEL
);
1073 read_extent_buffer(eb
, buf
, (unsigned long)(di
+ 1),
1074 name_len
+ data_len
);
1076 len
= sizeof(*di
) + name_len
+ data_len
;
1077 di
= (struct btrfs_dir_item
*)((char *)di
+ len
);
1080 ret
= iterate(num
, &di_key
, buf
, name_len
, buf
+ name_len
,
1081 data_len
, type
, ctx
);
1097 static int __copy_first_ref(int num
, u64 dir
, int index
,
1098 struct fs_path
*p
, void *ctx
)
1101 struct fs_path
*pt
= ctx
;
1103 ret
= fs_path_copy(pt
, p
);
1107 /* we want the first only */
1112 * Retrieve the first path of an inode. If an inode has more then one
1113 * ref/hardlink, this is ignored.
1115 static int get_inode_path(struct btrfs_root
*root
,
1116 u64 ino
, struct fs_path
*path
)
1119 struct btrfs_key key
, found_key
;
1120 struct btrfs_path
*p
;
1122 p
= alloc_path_for_send();
1126 fs_path_reset(path
);
1129 key
.type
= BTRFS_INODE_REF_KEY
;
1132 ret
= btrfs_search_slot_for_read(root
, &key
, p
, 1, 0);
1139 btrfs_item_key_to_cpu(p
->nodes
[0], &found_key
, p
->slots
[0]);
1140 if (found_key
.objectid
!= ino
||
1141 (found_key
.type
!= BTRFS_INODE_REF_KEY
&&
1142 found_key
.type
!= BTRFS_INODE_EXTREF_KEY
)) {
1147 ret
= iterate_inode_ref(root
, p
, &found_key
, 1,
1148 __copy_first_ref
, path
);
1158 struct backref_ctx
{
1159 struct send_ctx
*sctx
;
1161 struct btrfs_path
*path
;
1162 /* number of total found references */
1166 * used for clones found in send_root. clones found behind cur_objectid
1167 * and cur_offset are not considered as allowed clones.
1172 /* may be truncated in case it's the last extent in a file */
1175 /* data offset in the file extent item */
1178 /* Just to check for bugs in backref resolving */
1182 static int __clone_root_cmp_bsearch(const void *key
, const void *elt
)
1184 u64 root
= (u64
)(uintptr_t)key
;
1185 struct clone_root
*cr
= (struct clone_root
*)elt
;
1187 if (root
< cr
->root
->objectid
)
1189 if (root
> cr
->root
->objectid
)
1194 static int __clone_root_cmp_sort(const void *e1
, const void *e2
)
1196 struct clone_root
*cr1
= (struct clone_root
*)e1
;
1197 struct clone_root
*cr2
= (struct clone_root
*)e2
;
1199 if (cr1
->root
->objectid
< cr2
->root
->objectid
)
1201 if (cr1
->root
->objectid
> cr2
->root
->objectid
)
1207 * Called for every backref that is found for the current extent.
1208 * Results are collected in sctx->clone_roots->ino/offset/found_refs
1210 static int __iterate_backrefs(u64 ino
, u64 offset
, u64 root
, void *ctx_
)
1212 struct backref_ctx
*bctx
= ctx_
;
1213 struct clone_root
*found
;
1217 /* First check if the root is in the list of accepted clone sources */
1218 found
= bsearch((void *)(uintptr_t)root
, bctx
->sctx
->clone_roots
,
1219 bctx
->sctx
->clone_roots_cnt
,
1220 sizeof(struct clone_root
),
1221 __clone_root_cmp_bsearch
);
1225 if (found
->root
== bctx
->sctx
->send_root
&&
1226 ino
== bctx
->cur_objectid
&&
1227 offset
== bctx
->cur_offset
) {
1228 bctx
->found_itself
= 1;
1232 * There are inodes that have extents that lie behind its i_size. Don't
1233 * accept clones from these extents.
1235 ret
= __get_inode_info(found
->root
, bctx
->path
, ino
, &i_size
, NULL
, NULL
,
1237 btrfs_release_path(bctx
->path
);
1241 if (offset
+ bctx
->data_offset
+ bctx
->extent_len
> i_size
)
1245 * Make sure we don't consider clones from send_root that are
1246 * behind the current inode/offset.
1248 if (found
->root
== bctx
->sctx
->send_root
) {
1250 * TODO for the moment we don't accept clones from the inode
1251 * that is currently send. We may change this when
1252 * BTRFS_IOC_CLONE_RANGE supports cloning from and to the same
1255 if (ino
>= bctx
->cur_objectid
)
1260 found
->found_refs
++;
1261 if (ino
< found
->ino
) {
1263 found
->offset
= offset
;
1264 } else if (found
->ino
== ino
) {
1266 * same extent found more then once in the same file.
1268 if (found
->offset
> offset
+ bctx
->extent_len
)
1269 found
->offset
= offset
;
1276 * Given an inode, offset and extent item, it finds a good clone for a clone
1277 * instruction. Returns -ENOENT when none could be found. The function makes
1278 * sure that the returned clone is usable at the point where sending is at the
1279 * moment. This means, that no clones are accepted which lie behind the current
1282 * path must point to the extent item when called.
1284 static int find_extent_clone(struct send_ctx
*sctx
,
1285 struct btrfs_path
*path
,
1286 u64 ino
, u64 data_offset
,
1288 struct clone_root
**found
)
1290 struct btrfs_fs_info
*fs_info
= sctx
->send_root
->fs_info
;
1296 u64 extent_item_pos
;
1298 struct btrfs_file_extent_item
*fi
;
1299 struct extent_buffer
*eb
= path
->nodes
[0];
1300 struct backref_ctx
*backref_ctx
= NULL
;
1301 struct clone_root
*cur_clone_root
;
1302 struct btrfs_key found_key
;
1303 struct btrfs_path
*tmp_path
;
1307 tmp_path
= alloc_path_for_send();
1311 /* We only use this path under the commit sem */
1312 tmp_path
->need_commit_sem
= 0;
1314 backref_ctx
= kmalloc(sizeof(*backref_ctx
), GFP_KERNEL
);
1320 backref_ctx
->path
= tmp_path
;
1322 if (data_offset
>= ino_size
) {
1324 * There may be extents that lie behind the file's size.
1325 * I at least had this in combination with snapshotting while
1326 * writing large files.
1332 fi
= btrfs_item_ptr(eb
, path
->slots
[0],
1333 struct btrfs_file_extent_item
);
1334 extent_type
= btrfs_file_extent_type(eb
, fi
);
1335 if (extent_type
== BTRFS_FILE_EXTENT_INLINE
) {
1339 compressed
= btrfs_file_extent_compression(eb
, fi
);
1341 num_bytes
= btrfs_file_extent_num_bytes(eb
, fi
);
1342 disk_byte
= btrfs_file_extent_disk_bytenr(eb
, fi
);
1343 if (disk_byte
== 0) {
1347 logical
= disk_byte
+ btrfs_file_extent_offset(eb
, fi
);
1349 down_read(&fs_info
->commit_root_sem
);
1350 ret
= extent_from_logical(fs_info
, disk_byte
, tmp_path
,
1351 &found_key
, &flags
);
1352 up_read(&fs_info
->commit_root_sem
);
1353 btrfs_release_path(tmp_path
);
1357 if (flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
) {
1363 * Setup the clone roots.
1365 for (i
= 0; i
< sctx
->clone_roots_cnt
; i
++) {
1366 cur_clone_root
= sctx
->clone_roots
+ i
;
1367 cur_clone_root
->ino
= (u64
)-1;
1368 cur_clone_root
->offset
= 0;
1369 cur_clone_root
->found_refs
= 0;
1372 backref_ctx
->sctx
= sctx
;
1373 backref_ctx
->found
= 0;
1374 backref_ctx
->cur_objectid
= ino
;
1375 backref_ctx
->cur_offset
= data_offset
;
1376 backref_ctx
->found_itself
= 0;
1377 backref_ctx
->extent_len
= num_bytes
;
1379 * For non-compressed extents iterate_extent_inodes() gives us extent
1380 * offsets that already take into account the data offset, but not for
1381 * compressed extents, since the offset is logical and not relative to
1382 * the physical extent locations. We must take this into account to
1383 * avoid sending clone offsets that go beyond the source file's size,
1384 * which would result in the clone ioctl failing with -EINVAL on the
1387 if (compressed
== BTRFS_COMPRESS_NONE
)
1388 backref_ctx
->data_offset
= 0;
1390 backref_ctx
->data_offset
= btrfs_file_extent_offset(eb
, fi
);
1393 * The last extent of a file may be too large due to page alignment.
1394 * We need to adjust extent_len in this case so that the checks in
1395 * __iterate_backrefs work.
1397 if (data_offset
+ num_bytes
>= ino_size
)
1398 backref_ctx
->extent_len
= ino_size
- data_offset
;
1401 * Now collect all backrefs.
1403 if (compressed
== BTRFS_COMPRESS_NONE
)
1404 extent_item_pos
= logical
- found_key
.objectid
;
1406 extent_item_pos
= 0;
1407 ret
= iterate_extent_inodes(fs_info
, found_key
.objectid
,
1408 extent_item_pos
, 1, __iterate_backrefs
,
1409 backref_ctx
, false);
1414 if (!backref_ctx
->found_itself
) {
1415 /* found a bug in backref code? */
1418 "did not find backref in send_root. inode=%llu, offset=%llu, disk_byte=%llu found extent=%llu",
1419 ino
, data_offset
, disk_byte
, found_key
.objectid
);
1423 btrfs_debug(fs_info
,
1424 "find_extent_clone: data_offset=%llu, ino=%llu, num_bytes=%llu, logical=%llu",
1425 data_offset
, ino
, num_bytes
, logical
);
1427 if (!backref_ctx
->found
)
1428 btrfs_debug(fs_info
, "no clones found");
1430 cur_clone_root
= NULL
;
1431 for (i
= 0; i
< sctx
->clone_roots_cnt
; i
++) {
1432 if (sctx
->clone_roots
[i
].found_refs
) {
1433 if (!cur_clone_root
)
1434 cur_clone_root
= sctx
->clone_roots
+ i
;
1435 else if (sctx
->clone_roots
[i
].root
== sctx
->send_root
)
1436 /* prefer clones from send_root over others */
1437 cur_clone_root
= sctx
->clone_roots
+ i
;
1442 if (cur_clone_root
) {
1443 *found
= cur_clone_root
;
1450 btrfs_free_path(tmp_path
);
1455 static int read_symlink(struct btrfs_root
*root
,
1457 struct fs_path
*dest
)
1460 struct btrfs_path
*path
;
1461 struct btrfs_key key
;
1462 struct btrfs_file_extent_item
*ei
;
1468 path
= alloc_path_for_send();
1473 key
.type
= BTRFS_EXTENT_DATA_KEY
;
1475 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
1480 * An empty symlink inode. Can happen in rare error paths when
1481 * creating a symlink (transaction committed before the inode
1482 * eviction handler removed the symlink inode items and a crash
1483 * happened in between or the subvol was snapshoted in between).
1484 * Print an informative message to dmesg/syslog so that the user
1485 * can delete the symlink.
1487 btrfs_err(root
->fs_info
,
1488 "Found empty symlink inode %llu at root %llu",
1489 ino
, root
->root_key
.objectid
);
1494 ei
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
1495 struct btrfs_file_extent_item
);
1496 type
= btrfs_file_extent_type(path
->nodes
[0], ei
);
1497 compression
= btrfs_file_extent_compression(path
->nodes
[0], ei
);
1498 BUG_ON(type
!= BTRFS_FILE_EXTENT_INLINE
);
1499 BUG_ON(compression
);
1501 off
= btrfs_file_extent_inline_start(ei
);
1502 len
= btrfs_file_extent_inline_len(path
->nodes
[0], path
->slots
[0], ei
);
1504 ret
= fs_path_add_from_extent_buffer(dest
, path
->nodes
[0], off
, len
);
1507 btrfs_free_path(path
);
1512 * Helper function to generate a file name that is unique in the root of
1513 * send_root and parent_root. This is used to generate names for orphan inodes.
1515 static int gen_unique_name(struct send_ctx
*sctx
,
1517 struct fs_path
*dest
)
1520 struct btrfs_path
*path
;
1521 struct btrfs_dir_item
*di
;
1526 path
= alloc_path_for_send();
1531 len
= snprintf(tmp
, sizeof(tmp
), "o%llu-%llu-%llu",
1533 ASSERT(len
< sizeof(tmp
));
1535 di
= btrfs_lookup_dir_item(NULL
, sctx
->send_root
,
1536 path
, BTRFS_FIRST_FREE_OBJECTID
,
1537 tmp
, strlen(tmp
), 0);
1538 btrfs_release_path(path
);
1544 /* not unique, try again */
1549 if (!sctx
->parent_root
) {
1555 di
= btrfs_lookup_dir_item(NULL
, sctx
->parent_root
,
1556 path
, BTRFS_FIRST_FREE_OBJECTID
,
1557 tmp
, strlen(tmp
), 0);
1558 btrfs_release_path(path
);
1564 /* not unique, try again */
1572 ret
= fs_path_add(dest
, tmp
, strlen(tmp
));
1575 btrfs_free_path(path
);
1580 inode_state_no_change
,
1581 inode_state_will_create
,
1582 inode_state_did_create
,
1583 inode_state_will_delete
,
1584 inode_state_did_delete
,
1587 static int get_cur_inode_state(struct send_ctx
*sctx
, u64 ino
, u64 gen
)
1595 ret
= get_inode_info(sctx
->send_root
, ino
, NULL
, &left_gen
, NULL
, NULL
,
1597 if (ret
< 0 && ret
!= -ENOENT
)
1601 if (!sctx
->parent_root
) {
1602 right_ret
= -ENOENT
;
1604 ret
= get_inode_info(sctx
->parent_root
, ino
, NULL
, &right_gen
,
1605 NULL
, NULL
, NULL
, NULL
);
1606 if (ret
< 0 && ret
!= -ENOENT
)
1611 if (!left_ret
&& !right_ret
) {
1612 if (left_gen
== gen
&& right_gen
== gen
) {
1613 ret
= inode_state_no_change
;
1614 } else if (left_gen
== gen
) {
1615 if (ino
< sctx
->send_progress
)
1616 ret
= inode_state_did_create
;
1618 ret
= inode_state_will_create
;
1619 } else if (right_gen
== gen
) {
1620 if (ino
< sctx
->send_progress
)
1621 ret
= inode_state_did_delete
;
1623 ret
= inode_state_will_delete
;
1627 } else if (!left_ret
) {
1628 if (left_gen
== gen
) {
1629 if (ino
< sctx
->send_progress
)
1630 ret
= inode_state_did_create
;
1632 ret
= inode_state_will_create
;
1636 } else if (!right_ret
) {
1637 if (right_gen
== gen
) {
1638 if (ino
< sctx
->send_progress
)
1639 ret
= inode_state_did_delete
;
1641 ret
= inode_state_will_delete
;
1653 static int is_inode_existent(struct send_ctx
*sctx
, u64 ino
, u64 gen
)
1657 if (ino
== BTRFS_FIRST_FREE_OBJECTID
)
1660 ret
= get_cur_inode_state(sctx
, ino
, gen
);
1664 if (ret
== inode_state_no_change
||
1665 ret
== inode_state_did_create
||
1666 ret
== inode_state_will_delete
)
1676 * Helper function to lookup a dir item in a dir.
1678 static int lookup_dir_item_inode(struct btrfs_root
*root
,
1679 u64 dir
, const char *name
, int name_len
,
1684 struct btrfs_dir_item
*di
;
1685 struct btrfs_key key
;
1686 struct btrfs_path
*path
;
1688 path
= alloc_path_for_send();
1692 di
= btrfs_lookup_dir_item(NULL
, root
, path
,
1693 dir
, name
, name_len
, 0);
1702 btrfs_dir_item_key_to_cpu(path
->nodes
[0], di
, &key
);
1703 if (key
.type
== BTRFS_ROOT_ITEM_KEY
) {
1707 *found_inode
= key
.objectid
;
1708 *found_type
= btrfs_dir_type(path
->nodes
[0], di
);
1711 btrfs_free_path(path
);
1716 * Looks up the first btrfs_inode_ref of a given ino. It returns the parent dir,
1717 * generation of the parent dir and the name of the dir entry.
1719 static int get_first_ref(struct btrfs_root
*root
, u64 ino
,
1720 u64
*dir
, u64
*dir_gen
, struct fs_path
*name
)
1723 struct btrfs_key key
;
1724 struct btrfs_key found_key
;
1725 struct btrfs_path
*path
;
1729 path
= alloc_path_for_send();
1734 key
.type
= BTRFS_INODE_REF_KEY
;
1737 ret
= btrfs_search_slot_for_read(root
, &key
, path
, 1, 0);
1741 btrfs_item_key_to_cpu(path
->nodes
[0], &found_key
,
1743 if (ret
|| found_key
.objectid
!= ino
||
1744 (found_key
.type
!= BTRFS_INODE_REF_KEY
&&
1745 found_key
.type
!= BTRFS_INODE_EXTREF_KEY
)) {
1750 if (found_key
.type
== BTRFS_INODE_REF_KEY
) {
1751 struct btrfs_inode_ref
*iref
;
1752 iref
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
1753 struct btrfs_inode_ref
);
1754 len
= btrfs_inode_ref_name_len(path
->nodes
[0], iref
);
1755 ret
= fs_path_add_from_extent_buffer(name
, path
->nodes
[0],
1756 (unsigned long)(iref
+ 1),
1758 parent_dir
= found_key
.offset
;
1760 struct btrfs_inode_extref
*extref
;
1761 extref
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
1762 struct btrfs_inode_extref
);
1763 len
= btrfs_inode_extref_name_len(path
->nodes
[0], extref
);
1764 ret
= fs_path_add_from_extent_buffer(name
, path
->nodes
[0],
1765 (unsigned long)&extref
->name
, len
);
1766 parent_dir
= btrfs_inode_extref_parent(path
->nodes
[0], extref
);
1770 btrfs_release_path(path
);
1773 ret
= get_inode_info(root
, parent_dir
, NULL
, dir_gen
, NULL
,
1782 btrfs_free_path(path
);
1786 static int is_first_ref(struct btrfs_root
*root
,
1788 const char *name
, int name_len
)
1791 struct fs_path
*tmp_name
;
1794 tmp_name
= fs_path_alloc();
1798 ret
= get_first_ref(root
, ino
, &tmp_dir
, NULL
, tmp_name
);
1802 if (dir
!= tmp_dir
|| name_len
!= fs_path_len(tmp_name
)) {
1807 ret
= !memcmp(tmp_name
->start
, name
, name_len
);
1810 fs_path_free(tmp_name
);
1815 * Used by process_recorded_refs to determine if a new ref would overwrite an
1816 * already existing ref. In case it detects an overwrite, it returns the
1817 * inode/gen in who_ino/who_gen.
1818 * When an overwrite is detected, process_recorded_refs does proper orphanizing
1819 * to make sure later references to the overwritten inode are possible.
1820 * Orphanizing is however only required for the first ref of an inode.
1821 * process_recorded_refs does an additional is_first_ref check to see if
1822 * orphanizing is really required.
1824 static int will_overwrite_ref(struct send_ctx
*sctx
, u64 dir
, u64 dir_gen
,
1825 const char *name
, int name_len
,
1826 u64
*who_ino
, u64
*who_gen
, u64
*who_mode
)
1830 u64 other_inode
= 0;
1833 if (!sctx
->parent_root
)
1836 ret
= is_inode_existent(sctx
, dir
, dir_gen
);
1841 * If we have a parent root we need to verify that the parent dir was
1842 * not deleted and then re-created, if it was then we have no overwrite
1843 * and we can just unlink this entry.
1845 if (sctx
->parent_root
&& dir
!= BTRFS_FIRST_FREE_OBJECTID
) {
1846 ret
= get_inode_info(sctx
->parent_root
, dir
, NULL
, &gen
, NULL
,
1848 if (ret
< 0 && ret
!= -ENOENT
)
1858 ret
= lookup_dir_item_inode(sctx
->parent_root
, dir
, name
, name_len
,
1859 &other_inode
, &other_type
);
1860 if (ret
< 0 && ret
!= -ENOENT
)
1868 * Check if the overwritten ref was already processed. If yes, the ref
1869 * was already unlinked/moved, so we can safely assume that we will not
1870 * overwrite anything at this point in time.
1872 if (other_inode
> sctx
->send_progress
||
1873 is_waiting_for_move(sctx
, other_inode
)) {
1874 ret
= get_inode_info(sctx
->parent_root
, other_inode
, NULL
,
1875 who_gen
, who_mode
, NULL
, NULL
, NULL
);
1880 *who_ino
= other_inode
;
1890 * Checks if the ref was overwritten by an already processed inode. This is
1891 * used by __get_cur_name_and_parent to find out if the ref was orphanized and
1892 * thus the orphan name needs be used.
1893 * process_recorded_refs also uses it to avoid unlinking of refs that were
1896 static int did_overwrite_ref(struct send_ctx
*sctx
,
1897 u64 dir
, u64 dir_gen
,
1898 u64 ino
, u64 ino_gen
,
1899 const char *name
, int name_len
)
1906 if (!sctx
->parent_root
)
1909 ret
= is_inode_existent(sctx
, dir
, dir_gen
);
1913 if (dir
!= BTRFS_FIRST_FREE_OBJECTID
) {
1914 ret
= get_inode_info(sctx
->send_root
, dir
, NULL
, &gen
, NULL
,
1916 if (ret
< 0 && ret
!= -ENOENT
)
1926 /* check if the ref was overwritten by another ref */
1927 ret
= lookup_dir_item_inode(sctx
->send_root
, dir
, name
, name_len
,
1928 &ow_inode
, &other_type
);
1929 if (ret
< 0 && ret
!= -ENOENT
)
1932 /* was never and will never be overwritten */
1937 ret
= get_inode_info(sctx
->send_root
, ow_inode
, NULL
, &gen
, NULL
, NULL
,
1942 if (ow_inode
== ino
&& gen
== ino_gen
) {
1948 * We know that it is or will be overwritten. Check this now.
1949 * The current inode being processed might have been the one that caused
1950 * inode 'ino' to be orphanized, therefore check if ow_inode matches
1951 * the current inode being processed.
1953 if ((ow_inode
< sctx
->send_progress
) ||
1954 (ino
!= sctx
->cur_ino
&& ow_inode
== sctx
->cur_ino
&&
1955 gen
== sctx
->cur_inode_gen
))
1965 * Same as did_overwrite_ref, but also checks if it is the first ref of an inode
1966 * that got overwritten. This is used by process_recorded_refs to determine
1967 * if it has to use the path as returned by get_cur_path or the orphan name.
1969 static int did_overwrite_first_ref(struct send_ctx
*sctx
, u64 ino
, u64 gen
)
1972 struct fs_path
*name
= NULL
;
1976 if (!sctx
->parent_root
)
1979 name
= fs_path_alloc();
1983 ret
= get_first_ref(sctx
->parent_root
, ino
, &dir
, &dir_gen
, name
);
1987 ret
= did_overwrite_ref(sctx
, dir
, dir_gen
, ino
, gen
,
1988 name
->start
, fs_path_len(name
));
1996 * Insert a name cache entry. On 32bit kernels the radix tree index is 32bit,
1997 * so we need to do some special handling in case we have clashes. This function
1998 * takes care of this with the help of name_cache_entry::radix_list.
1999 * In case of error, nce is kfreed.
2001 static int name_cache_insert(struct send_ctx
*sctx
,
2002 struct name_cache_entry
*nce
)
2005 struct list_head
*nce_head
;
2007 nce_head
= radix_tree_lookup(&sctx
->name_cache
,
2008 (unsigned long)nce
->ino
);
2010 nce_head
= kmalloc(sizeof(*nce_head
), GFP_KERNEL
);
2015 INIT_LIST_HEAD(nce_head
);
2017 ret
= radix_tree_insert(&sctx
->name_cache
, nce
->ino
, nce_head
);
2024 list_add_tail(&nce
->radix_list
, nce_head
);
2025 list_add_tail(&nce
->list
, &sctx
->name_cache_list
);
2026 sctx
->name_cache_size
++;
2031 static void name_cache_delete(struct send_ctx
*sctx
,
2032 struct name_cache_entry
*nce
)
2034 struct list_head
*nce_head
;
2036 nce_head
= radix_tree_lookup(&sctx
->name_cache
,
2037 (unsigned long)nce
->ino
);
2039 btrfs_err(sctx
->send_root
->fs_info
,
2040 "name_cache_delete lookup failed ino %llu cache size %d, leaking memory",
2041 nce
->ino
, sctx
->name_cache_size
);
2044 list_del(&nce
->radix_list
);
2045 list_del(&nce
->list
);
2046 sctx
->name_cache_size
--;
2049 * We may not get to the final release of nce_head if the lookup fails
2051 if (nce_head
&& list_empty(nce_head
)) {
2052 radix_tree_delete(&sctx
->name_cache
, (unsigned long)nce
->ino
);
2057 static struct name_cache_entry
*name_cache_search(struct send_ctx
*sctx
,
2060 struct list_head
*nce_head
;
2061 struct name_cache_entry
*cur
;
2063 nce_head
= radix_tree_lookup(&sctx
->name_cache
, (unsigned long)ino
);
2067 list_for_each_entry(cur
, nce_head
, radix_list
) {
2068 if (cur
->ino
== ino
&& cur
->gen
== gen
)
2075 * Removes the entry from the list and adds it back to the end. This marks the
2076 * entry as recently used so that name_cache_clean_unused does not remove it.
2078 static void name_cache_used(struct send_ctx
*sctx
, struct name_cache_entry
*nce
)
2080 list_del(&nce
->list
);
2081 list_add_tail(&nce
->list
, &sctx
->name_cache_list
);
2085 * Remove some entries from the beginning of name_cache_list.
2087 static void name_cache_clean_unused(struct send_ctx
*sctx
)
2089 struct name_cache_entry
*nce
;
2091 if (sctx
->name_cache_size
< SEND_CTX_NAME_CACHE_CLEAN_SIZE
)
2094 while (sctx
->name_cache_size
> SEND_CTX_MAX_NAME_CACHE_SIZE
) {
2095 nce
= list_entry(sctx
->name_cache_list
.next
,
2096 struct name_cache_entry
, list
);
2097 name_cache_delete(sctx
, nce
);
2102 static void name_cache_free(struct send_ctx
*sctx
)
2104 struct name_cache_entry
*nce
;
2106 while (!list_empty(&sctx
->name_cache_list
)) {
2107 nce
= list_entry(sctx
->name_cache_list
.next
,
2108 struct name_cache_entry
, list
);
2109 name_cache_delete(sctx
, nce
);
2115 * Used by get_cur_path for each ref up to the root.
2116 * Returns 0 if it succeeded.
2117 * Returns 1 if the inode is not existent or got overwritten. In that case, the
2118 * name is an orphan name. This instructs get_cur_path to stop iterating. If 1
2119 * is returned, parent_ino/parent_gen are not guaranteed to be valid.
2120 * Returns <0 in case of error.
2122 static int __get_cur_name_and_parent(struct send_ctx
*sctx
,
2126 struct fs_path
*dest
)
2130 struct name_cache_entry
*nce
= NULL
;
2133 * First check if we already did a call to this function with the same
2134 * ino/gen. If yes, check if the cache entry is still up-to-date. If yes
2135 * return the cached result.
2137 nce
= name_cache_search(sctx
, ino
, gen
);
2139 if (ino
< sctx
->send_progress
&& nce
->need_later_update
) {
2140 name_cache_delete(sctx
, nce
);
2144 name_cache_used(sctx
, nce
);
2145 *parent_ino
= nce
->parent_ino
;
2146 *parent_gen
= nce
->parent_gen
;
2147 ret
= fs_path_add(dest
, nce
->name
, nce
->name_len
);
2156 * If the inode is not existent yet, add the orphan name and return 1.
2157 * This should only happen for the parent dir that we determine in
2160 ret
= is_inode_existent(sctx
, ino
, gen
);
2165 ret
= gen_unique_name(sctx
, ino
, gen
, dest
);
2173 * Depending on whether the inode was already processed or not, use
2174 * send_root or parent_root for ref lookup.
2176 if (ino
< sctx
->send_progress
)
2177 ret
= get_first_ref(sctx
->send_root
, ino
,
2178 parent_ino
, parent_gen
, dest
);
2180 ret
= get_first_ref(sctx
->parent_root
, ino
,
2181 parent_ino
, parent_gen
, dest
);
2186 * Check if the ref was overwritten by an inode's ref that was processed
2187 * earlier. If yes, treat as orphan and return 1.
2189 ret
= did_overwrite_ref(sctx
, *parent_ino
, *parent_gen
, ino
, gen
,
2190 dest
->start
, dest
->end
- dest
->start
);
2194 fs_path_reset(dest
);
2195 ret
= gen_unique_name(sctx
, ino
, gen
, dest
);
2203 * Store the result of the lookup in the name cache.
2205 nce
= kmalloc(sizeof(*nce
) + fs_path_len(dest
) + 1, GFP_KERNEL
);
2213 nce
->parent_ino
= *parent_ino
;
2214 nce
->parent_gen
= *parent_gen
;
2215 nce
->name_len
= fs_path_len(dest
);
2217 strcpy(nce
->name
, dest
->start
);
2219 if (ino
< sctx
->send_progress
)
2220 nce
->need_later_update
= 0;
2222 nce
->need_later_update
= 1;
2224 nce_ret
= name_cache_insert(sctx
, nce
);
2227 name_cache_clean_unused(sctx
);
2234 * Magic happens here. This function returns the first ref to an inode as it
2235 * would look like while receiving the stream at this point in time.
2236 * We walk the path up to the root. For every inode in between, we check if it
2237 * was already processed/sent. If yes, we continue with the parent as found
2238 * in send_root. If not, we continue with the parent as found in parent_root.
2239 * If we encounter an inode that was deleted at this point in time, we use the
2240 * inodes "orphan" name instead of the real name and stop. Same with new inodes
2241 * that were not created yet and overwritten inodes/refs.
2243 * When do we have have orphan inodes:
2244 * 1. When an inode is freshly created and thus no valid refs are available yet
2245 * 2. When a directory lost all it's refs (deleted) but still has dir items
2246 * inside which were not processed yet (pending for move/delete). If anyone
2247 * tried to get the path to the dir items, it would get a path inside that
2249 * 3. When an inode is moved around or gets new links, it may overwrite the ref
2250 * of an unprocessed inode. If in that case the first ref would be
2251 * overwritten, the overwritten inode gets "orphanized". Later when we
2252 * process this overwritten inode, it is restored at a new place by moving
2255 * sctx->send_progress tells this function at which point in time receiving
2258 static int get_cur_path(struct send_ctx
*sctx
, u64 ino
, u64 gen
,
2259 struct fs_path
*dest
)
2262 struct fs_path
*name
= NULL
;
2263 u64 parent_inode
= 0;
2267 name
= fs_path_alloc();
2274 fs_path_reset(dest
);
2276 while (!stop
&& ino
!= BTRFS_FIRST_FREE_OBJECTID
) {
2277 struct waiting_dir_move
*wdm
;
2279 fs_path_reset(name
);
2281 if (is_waiting_for_rm(sctx
, ino
)) {
2282 ret
= gen_unique_name(sctx
, ino
, gen
, name
);
2285 ret
= fs_path_add_path(dest
, name
);
2289 wdm
= get_waiting_dir_move(sctx
, ino
);
2290 if (wdm
&& wdm
->orphanized
) {
2291 ret
= gen_unique_name(sctx
, ino
, gen
, name
);
2294 ret
= get_first_ref(sctx
->parent_root
, ino
,
2295 &parent_inode
, &parent_gen
, name
);
2297 ret
= __get_cur_name_and_parent(sctx
, ino
, gen
,
2307 ret
= fs_path_add_path(dest
, name
);
2318 fs_path_unreverse(dest
);
2323 * Sends a BTRFS_SEND_C_SUBVOL command/item to userspace
2325 static int send_subvol_begin(struct send_ctx
*sctx
)
2328 struct btrfs_root
*send_root
= sctx
->send_root
;
2329 struct btrfs_root
*parent_root
= sctx
->parent_root
;
2330 struct btrfs_path
*path
;
2331 struct btrfs_key key
;
2332 struct btrfs_root_ref
*ref
;
2333 struct extent_buffer
*leaf
;
2337 path
= btrfs_alloc_path();
2341 name
= kmalloc(BTRFS_PATH_NAME_MAX
, GFP_KERNEL
);
2343 btrfs_free_path(path
);
2347 key
.objectid
= send_root
->objectid
;
2348 key
.type
= BTRFS_ROOT_BACKREF_KEY
;
2351 ret
= btrfs_search_slot_for_read(send_root
->fs_info
->tree_root
,
2360 leaf
= path
->nodes
[0];
2361 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
2362 if (key
.type
!= BTRFS_ROOT_BACKREF_KEY
||
2363 key
.objectid
!= send_root
->objectid
) {
2367 ref
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_root_ref
);
2368 namelen
= btrfs_root_ref_name_len(leaf
, ref
);
2369 read_extent_buffer(leaf
, name
, (unsigned long)(ref
+ 1), namelen
);
2370 btrfs_release_path(path
);
2373 ret
= begin_cmd(sctx
, BTRFS_SEND_C_SNAPSHOT
);
2377 ret
= begin_cmd(sctx
, BTRFS_SEND_C_SUBVOL
);
2382 TLV_PUT_STRING(sctx
, BTRFS_SEND_A_PATH
, name
, namelen
);
2384 if (!btrfs_is_empty_uuid(sctx
->send_root
->root_item
.received_uuid
))
2385 TLV_PUT_UUID(sctx
, BTRFS_SEND_A_UUID
,
2386 sctx
->send_root
->root_item
.received_uuid
);
2388 TLV_PUT_UUID(sctx
, BTRFS_SEND_A_UUID
,
2389 sctx
->send_root
->root_item
.uuid
);
2391 TLV_PUT_U64(sctx
, BTRFS_SEND_A_CTRANSID
,
2392 le64_to_cpu(sctx
->send_root
->root_item
.ctransid
));
2394 if (!btrfs_is_empty_uuid(parent_root
->root_item
.received_uuid
))
2395 TLV_PUT_UUID(sctx
, BTRFS_SEND_A_CLONE_UUID
,
2396 parent_root
->root_item
.received_uuid
);
2398 TLV_PUT_UUID(sctx
, BTRFS_SEND_A_CLONE_UUID
,
2399 parent_root
->root_item
.uuid
);
2400 TLV_PUT_U64(sctx
, BTRFS_SEND_A_CLONE_CTRANSID
,
2401 le64_to_cpu(sctx
->parent_root
->root_item
.ctransid
));
2404 ret
= send_cmd(sctx
);
2408 btrfs_free_path(path
);
2413 static int send_truncate(struct send_ctx
*sctx
, u64 ino
, u64 gen
, u64 size
)
2415 struct btrfs_fs_info
*fs_info
= sctx
->send_root
->fs_info
;
2419 btrfs_debug(fs_info
, "send_truncate %llu size=%llu", ino
, size
);
2421 p
= fs_path_alloc();
2425 ret
= begin_cmd(sctx
, BTRFS_SEND_C_TRUNCATE
);
2429 ret
= get_cur_path(sctx
, ino
, gen
, p
);
2432 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH
, p
);
2433 TLV_PUT_U64(sctx
, BTRFS_SEND_A_SIZE
, size
);
2435 ret
= send_cmd(sctx
);
2443 static int send_chmod(struct send_ctx
*sctx
, u64 ino
, u64 gen
, u64 mode
)
2445 struct btrfs_fs_info
*fs_info
= sctx
->send_root
->fs_info
;
2449 btrfs_debug(fs_info
, "send_chmod %llu mode=%llu", ino
, mode
);
2451 p
= fs_path_alloc();
2455 ret
= begin_cmd(sctx
, BTRFS_SEND_C_CHMOD
);
2459 ret
= get_cur_path(sctx
, ino
, gen
, p
);
2462 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH
, p
);
2463 TLV_PUT_U64(sctx
, BTRFS_SEND_A_MODE
, mode
& 07777);
2465 ret
= send_cmd(sctx
);
2473 static int send_chown(struct send_ctx
*sctx
, u64 ino
, u64 gen
, u64 uid
, u64 gid
)
2475 struct btrfs_fs_info
*fs_info
= sctx
->send_root
->fs_info
;
2479 btrfs_debug(fs_info
, "send_chown %llu uid=%llu, gid=%llu",
2482 p
= fs_path_alloc();
2486 ret
= begin_cmd(sctx
, BTRFS_SEND_C_CHOWN
);
2490 ret
= get_cur_path(sctx
, ino
, gen
, p
);
2493 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH
, p
);
2494 TLV_PUT_U64(sctx
, BTRFS_SEND_A_UID
, uid
);
2495 TLV_PUT_U64(sctx
, BTRFS_SEND_A_GID
, gid
);
2497 ret
= send_cmd(sctx
);
2505 static int send_utimes(struct send_ctx
*sctx
, u64 ino
, u64 gen
)
2507 struct btrfs_fs_info
*fs_info
= sctx
->send_root
->fs_info
;
2509 struct fs_path
*p
= NULL
;
2510 struct btrfs_inode_item
*ii
;
2511 struct btrfs_path
*path
= NULL
;
2512 struct extent_buffer
*eb
;
2513 struct btrfs_key key
;
2516 btrfs_debug(fs_info
, "send_utimes %llu", ino
);
2518 p
= fs_path_alloc();
2522 path
= alloc_path_for_send();
2529 key
.type
= BTRFS_INODE_ITEM_KEY
;
2531 ret
= btrfs_search_slot(NULL
, sctx
->send_root
, &key
, path
, 0, 0);
2537 eb
= path
->nodes
[0];
2538 slot
= path
->slots
[0];
2539 ii
= btrfs_item_ptr(eb
, slot
, struct btrfs_inode_item
);
2541 ret
= begin_cmd(sctx
, BTRFS_SEND_C_UTIMES
);
2545 ret
= get_cur_path(sctx
, ino
, gen
, p
);
2548 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH
, p
);
2549 TLV_PUT_BTRFS_TIMESPEC(sctx
, BTRFS_SEND_A_ATIME
, eb
, &ii
->atime
);
2550 TLV_PUT_BTRFS_TIMESPEC(sctx
, BTRFS_SEND_A_MTIME
, eb
, &ii
->mtime
);
2551 TLV_PUT_BTRFS_TIMESPEC(sctx
, BTRFS_SEND_A_CTIME
, eb
, &ii
->ctime
);
2552 /* TODO Add otime support when the otime patches get into upstream */
2554 ret
= send_cmd(sctx
);
2559 btrfs_free_path(path
);
2564 * Sends a BTRFS_SEND_C_MKXXX or SYMLINK command to user space. We don't have
2565 * a valid path yet because we did not process the refs yet. So, the inode
2566 * is created as orphan.
2568 static int send_create_inode(struct send_ctx
*sctx
, u64 ino
)
2570 struct btrfs_fs_info
*fs_info
= sctx
->send_root
->fs_info
;
2578 btrfs_debug(fs_info
, "send_create_inode %llu", ino
);
2580 p
= fs_path_alloc();
2584 if (ino
!= sctx
->cur_ino
) {
2585 ret
= get_inode_info(sctx
->send_root
, ino
, NULL
, &gen
, &mode
,
2590 gen
= sctx
->cur_inode_gen
;
2591 mode
= sctx
->cur_inode_mode
;
2592 rdev
= sctx
->cur_inode_rdev
;
2595 if (S_ISREG(mode
)) {
2596 cmd
= BTRFS_SEND_C_MKFILE
;
2597 } else if (S_ISDIR(mode
)) {
2598 cmd
= BTRFS_SEND_C_MKDIR
;
2599 } else if (S_ISLNK(mode
)) {
2600 cmd
= BTRFS_SEND_C_SYMLINK
;
2601 } else if (S_ISCHR(mode
) || S_ISBLK(mode
)) {
2602 cmd
= BTRFS_SEND_C_MKNOD
;
2603 } else if (S_ISFIFO(mode
)) {
2604 cmd
= BTRFS_SEND_C_MKFIFO
;
2605 } else if (S_ISSOCK(mode
)) {
2606 cmd
= BTRFS_SEND_C_MKSOCK
;
2608 btrfs_warn(sctx
->send_root
->fs_info
, "unexpected inode type %o",
2609 (int)(mode
& S_IFMT
));
2614 ret
= begin_cmd(sctx
, cmd
);
2618 ret
= gen_unique_name(sctx
, ino
, gen
, p
);
2622 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH
, p
);
2623 TLV_PUT_U64(sctx
, BTRFS_SEND_A_INO
, ino
);
2625 if (S_ISLNK(mode
)) {
2627 ret
= read_symlink(sctx
->send_root
, ino
, p
);
2630 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH_LINK
, p
);
2631 } else if (S_ISCHR(mode
) || S_ISBLK(mode
) ||
2632 S_ISFIFO(mode
) || S_ISSOCK(mode
)) {
2633 TLV_PUT_U64(sctx
, BTRFS_SEND_A_RDEV
, new_encode_dev(rdev
));
2634 TLV_PUT_U64(sctx
, BTRFS_SEND_A_MODE
, mode
);
2637 ret
= send_cmd(sctx
);
2649 * We need some special handling for inodes that get processed before the parent
2650 * directory got created. See process_recorded_refs for details.
2651 * This function does the check if we already created the dir out of order.
2653 static int did_create_dir(struct send_ctx
*sctx
, u64 dir
)
2656 struct btrfs_path
*path
= NULL
;
2657 struct btrfs_key key
;
2658 struct btrfs_key found_key
;
2659 struct btrfs_key di_key
;
2660 struct extent_buffer
*eb
;
2661 struct btrfs_dir_item
*di
;
2664 path
= alloc_path_for_send();
2671 key
.type
= BTRFS_DIR_INDEX_KEY
;
2673 ret
= btrfs_search_slot(NULL
, sctx
->send_root
, &key
, path
, 0, 0);
2678 eb
= path
->nodes
[0];
2679 slot
= path
->slots
[0];
2680 if (slot
>= btrfs_header_nritems(eb
)) {
2681 ret
= btrfs_next_leaf(sctx
->send_root
, path
);
2684 } else if (ret
> 0) {
2691 btrfs_item_key_to_cpu(eb
, &found_key
, slot
);
2692 if (found_key
.objectid
!= key
.objectid
||
2693 found_key
.type
!= key
.type
) {
2698 di
= btrfs_item_ptr(eb
, slot
, struct btrfs_dir_item
);
2699 btrfs_dir_item_key_to_cpu(eb
, di
, &di_key
);
2701 if (di_key
.type
!= BTRFS_ROOT_ITEM_KEY
&&
2702 di_key
.objectid
< sctx
->send_progress
) {
2711 btrfs_free_path(path
);
2716 * Only creates the inode if it is:
2717 * 1. Not a directory
2718 * 2. Or a directory which was not created already due to out of order
2719 * directories. See did_create_dir and process_recorded_refs for details.
2721 static int send_create_inode_if_needed(struct send_ctx
*sctx
)
2725 if (S_ISDIR(sctx
->cur_inode_mode
)) {
2726 ret
= did_create_dir(sctx
, sctx
->cur_ino
);
2735 ret
= send_create_inode(sctx
, sctx
->cur_ino
);
2743 struct recorded_ref
{
2744 struct list_head list
;
2746 struct fs_path
*full_path
;
2752 static void set_ref_path(struct recorded_ref
*ref
, struct fs_path
*path
)
2754 ref
->full_path
= path
;
2755 ref
->name
= (char *)kbasename(ref
->full_path
->start
);
2756 ref
->name_len
= ref
->full_path
->end
- ref
->name
;
2760 * We need to process new refs before deleted refs, but compare_tree gives us
2761 * everything mixed. So we first record all refs and later process them.
2762 * This function is a helper to record one ref.
2764 static int __record_ref(struct list_head
*head
, u64 dir
,
2765 u64 dir_gen
, struct fs_path
*path
)
2767 struct recorded_ref
*ref
;
2769 ref
= kmalloc(sizeof(*ref
), GFP_KERNEL
);
2774 ref
->dir_gen
= dir_gen
;
2775 set_ref_path(ref
, path
);
2776 list_add_tail(&ref
->list
, head
);
2780 static int dup_ref(struct recorded_ref
*ref
, struct list_head
*list
)
2782 struct recorded_ref
*new;
2784 new = kmalloc(sizeof(*ref
), GFP_KERNEL
);
2788 new->dir
= ref
->dir
;
2789 new->dir_gen
= ref
->dir_gen
;
2790 new->full_path
= NULL
;
2791 INIT_LIST_HEAD(&new->list
);
2792 list_add_tail(&new->list
, list
);
2796 static void __free_recorded_refs(struct list_head
*head
)
2798 struct recorded_ref
*cur
;
2800 while (!list_empty(head
)) {
2801 cur
= list_entry(head
->next
, struct recorded_ref
, list
);
2802 fs_path_free(cur
->full_path
);
2803 list_del(&cur
->list
);
2808 static void free_recorded_refs(struct send_ctx
*sctx
)
2810 __free_recorded_refs(&sctx
->new_refs
);
2811 __free_recorded_refs(&sctx
->deleted_refs
);
2815 * Renames/moves a file/dir to its orphan name. Used when the first
2816 * ref of an unprocessed inode gets overwritten and for all non empty
2819 static int orphanize_inode(struct send_ctx
*sctx
, u64 ino
, u64 gen
,
2820 struct fs_path
*path
)
2823 struct fs_path
*orphan
;
2825 orphan
= fs_path_alloc();
2829 ret
= gen_unique_name(sctx
, ino
, gen
, orphan
);
2833 ret
= send_rename(sctx
, path
, orphan
);
2836 fs_path_free(orphan
);
2840 static struct orphan_dir_info
*
2841 add_orphan_dir_info(struct send_ctx
*sctx
, u64 dir_ino
)
2843 struct rb_node
**p
= &sctx
->orphan_dirs
.rb_node
;
2844 struct rb_node
*parent
= NULL
;
2845 struct orphan_dir_info
*entry
, *odi
;
2847 odi
= kmalloc(sizeof(*odi
), GFP_KERNEL
);
2849 return ERR_PTR(-ENOMEM
);
2855 entry
= rb_entry(parent
, struct orphan_dir_info
, node
);
2856 if (dir_ino
< entry
->ino
) {
2858 } else if (dir_ino
> entry
->ino
) {
2859 p
= &(*p
)->rb_right
;
2866 rb_link_node(&odi
->node
, parent
, p
);
2867 rb_insert_color(&odi
->node
, &sctx
->orphan_dirs
);
2871 static struct orphan_dir_info
*
2872 get_orphan_dir_info(struct send_ctx
*sctx
, u64 dir_ino
)
2874 struct rb_node
*n
= sctx
->orphan_dirs
.rb_node
;
2875 struct orphan_dir_info
*entry
;
2878 entry
= rb_entry(n
, struct orphan_dir_info
, node
);
2879 if (dir_ino
< entry
->ino
)
2881 else if (dir_ino
> entry
->ino
)
2889 static int is_waiting_for_rm(struct send_ctx
*sctx
, u64 dir_ino
)
2891 struct orphan_dir_info
*odi
= get_orphan_dir_info(sctx
, dir_ino
);
2896 static void free_orphan_dir_info(struct send_ctx
*sctx
,
2897 struct orphan_dir_info
*odi
)
2901 rb_erase(&odi
->node
, &sctx
->orphan_dirs
);
2906 * Returns 1 if a directory can be removed at this point in time.
2907 * We check this by iterating all dir items and checking if the inode behind
2908 * the dir item was already processed.
2910 static int can_rmdir(struct send_ctx
*sctx
, u64 dir
, u64 dir_gen
,
2914 struct btrfs_root
*root
= sctx
->parent_root
;
2915 struct btrfs_path
*path
;
2916 struct btrfs_key key
;
2917 struct btrfs_key found_key
;
2918 struct btrfs_key loc
;
2919 struct btrfs_dir_item
*di
;
2922 * Don't try to rmdir the top/root subvolume dir.
2924 if (dir
== BTRFS_FIRST_FREE_OBJECTID
)
2927 path
= alloc_path_for_send();
2932 key
.type
= BTRFS_DIR_INDEX_KEY
;
2934 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
2939 struct waiting_dir_move
*dm
;
2941 if (path
->slots
[0] >= btrfs_header_nritems(path
->nodes
[0])) {
2942 ret
= btrfs_next_leaf(root
, path
);
2949 btrfs_item_key_to_cpu(path
->nodes
[0], &found_key
,
2951 if (found_key
.objectid
!= key
.objectid
||
2952 found_key
.type
!= key
.type
)
2955 di
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
2956 struct btrfs_dir_item
);
2957 btrfs_dir_item_key_to_cpu(path
->nodes
[0], di
, &loc
);
2959 dm
= get_waiting_dir_move(sctx
, loc
.objectid
);
2961 struct orphan_dir_info
*odi
;
2963 odi
= add_orphan_dir_info(sctx
, dir
);
2969 dm
->rmdir_ino
= dir
;
2974 if (loc
.objectid
> send_progress
) {
2975 struct orphan_dir_info
*odi
;
2977 odi
= get_orphan_dir_info(sctx
, dir
);
2978 free_orphan_dir_info(sctx
, odi
);
2989 btrfs_free_path(path
);
2993 static int is_waiting_for_move(struct send_ctx
*sctx
, u64 ino
)
2995 struct waiting_dir_move
*entry
= get_waiting_dir_move(sctx
, ino
);
2997 return entry
!= NULL
;
3000 static int add_waiting_dir_move(struct send_ctx
*sctx
, u64 ino
, bool orphanized
)
3002 struct rb_node
**p
= &sctx
->waiting_dir_moves
.rb_node
;
3003 struct rb_node
*parent
= NULL
;
3004 struct waiting_dir_move
*entry
, *dm
;
3006 dm
= kmalloc(sizeof(*dm
), GFP_KERNEL
);
3011 dm
->orphanized
= orphanized
;
3015 entry
= rb_entry(parent
, struct waiting_dir_move
, node
);
3016 if (ino
< entry
->ino
) {
3018 } else if (ino
> entry
->ino
) {
3019 p
= &(*p
)->rb_right
;
3026 rb_link_node(&dm
->node
, parent
, p
);
3027 rb_insert_color(&dm
->node
, &sctx
->waiting_dir_moves
);
3031 static struct waiting_dir_move
*
3032 get_waiting_dir_move(struct send_ctx
*sctx
, u64 ino
)
3034 struct rb_node
*n
= sctx
->waiting_dir_moves
.rb_node
;
3035 struct waiting_dir_move
*entry
;
3038 entry
= rb_entry(n
, struct waiting_dir_move
, node
);
3039 if (ino
< entry
->ino
)
3041 else if (ino
> entry
->ino
)
3049 static void free_waiting_dir_move(struct send_ctx
*sctx
,
3050 struct waiting_dir_move
*dm
)
3054 rb_erase(&dm
->node
, &sctx
->waiting_dir_moves
);
3058 static int add_pending_dir_move(struct send_ctx
*sctx
,
3062 struct list_head
*new_refs
,
3063 struct list_head
*deleted_refs
,
3064 const bool is_orphan
)
3066 struct rb_node
**p
= &sctx
->pending_dir_moves
.rb_node
;
3067 struct rb_node
*parent
= NULL
;
3068 struct pending_dir_move
*entry
= NULL
, *pm
;
3069 struct recorded_ref
*cur
;
3073 pm
= kmalloc(sizeof(*pm
), GFP_KERNEL
);
3076 pm
->parent_ino
= parent_ino
;
3079 INIT_LIST_HEAD(&pm
->list
);
3080 INIT_LIST_HEAD(&pm
->update_refs
);
3081 RB_CLEAR_NODE(&pm
->node
);
3085 entry
= rb_entry(parent
, struct pending_dir_move
, node
);
3086 if (parent_ino
< entry
->parent_ino
) {
3088 } else if (parent_ino
> entry
->parent_ino
) {
3089 p
= &(*p
)->rb_right
;
3096 list_for_each_entry(cur
, deleted_refs
, list
) {
3097 ret
= dup_ref(cur
, &pm
->update_refs
);
3101 list_for_each_entry(cur
, new_refs
, list
) {
3102 ret
= dup_ref(cur
, &pm
->update_refs
);
3107 ret
= add_waiting_dir_move(sctx
, pm
->ino
, is_orphan
);
3112 list_add_tail(&pm
->list
, &entry
->list
);
3114 rb_link_node(&pm
->node
, parent
, p
);
3115 rb_insert_color(&pm
->node
, &sctx
->pending_dir_moves
);
3120 __free_recorded_refs(&pm
->update_refs
);
3126 static struct pending_dir_move
*get_pending_dir_moves(struct send_ctx
*sctx
,
3129 struct rb_node
*n
= sctx
->pending_dir_moves
.rb_node
;
3130 struct pending_dir_move
*entry
;
3133 entry
= rb_entry(n
, struct pending_dir_move
, node
);
3134 if (parent_ino
< entry
->parent_ino
)
3136 else if (parent_ino
> entry
->parent_ino
)
3144 static int path_loop(struct send_ctx
*sctx
, struct fs_path
*name
,
3145 u64 ino
, u64 gen
, u64
*ancestor_ino
)
3148 u64 parent_inode
= 0;
3150 u64 start_ino
= ino
;
3153 while (ino
!= BTRFS_FIRST_FREE_OBJECTID
) {
3154 fs_path_reset(name
);
3156 if (is_waiting_for_rm(sctx
, ino
))
3158 if (is_waiting_for_move(sctx
, ino
)) {
3159 if (*ancestor_ino
== 0)
3160 *ancestor_ino
= ino
;
3161 ret
= get_first_ref(sctx
->parent_root
, ino
,
3162 &parent_inode
, &parent_gen
, name
);
3164 ret
= __get_cur_name_and_parent(sctx
, ino
, gen
,
3174 if (parent_inode
== start_ino
) {
3176 if (*ancestor_ino
== 0)
3177 *ancestor_ino
= ino
;
3186 static int apply_dir_move(struct send_ctx
*sctx
, struct pending_dir_move
*pm
)
3188 struct fs_path
*from_path
= NULL
;
3189 struct fs_path
*to_path
= NULL
;
3190 struct fs_path
*name
= NULL
;
3191 u64 orig_progress
= sctx
->send_progress
;
3192 struct recorded_ref
*cur
;
3193 u64 parent_ino
, parent_gen
;
3194 struct waiting_dir_move
*dm
= NULL
;
3200 name
= fs_path_alloc();
3201 from_path
= fs_path_alloc();
3202 if (!name
|| !from_path
) {
3207 dm
= get_waiting_dir_move(sctx
, pm
->ino
);
3209 rmdir_ino
= dm
->rmdir_ino
;
3210 is_orphan
= dm
->orphanized
;
3211 free_waiting_dir_move(sctx
, dm
);
3214 ret
= gen_unique_name(sctx
, pm
->ino
,
3215 pm
->gen
, from_path
);
3217 ret
= get_first_ref(sctx
->parent_root
, pm
->ino
,
3218 &parent_ino
, &parent_gen
, name
);
3221 ret
= get_cur_path(sctx
, parent_ino
, parent_gen
,
3225 ret
= fs_path_add_path(from_path
, name
);
3230 sctx
->send_progress
= sctx
->cur_ino
+ 1;
3231 ret
= path_loop(sctx
, name
, pm
->ino
, pm
->gen
, &ancestor
);
3235 LIST_HEAD(deleted_refs
);
3236 ASSERT(ancestor
> BTRFS_FIRST_FREE_OBJECTID
);
3237 ret
= add_pending_dir_move(sctx
, pm
->ino
, pm
->gen
, ancestor
,
3238 &pm
->update_refs
, &deleted_refs
,
3243 dm
= get_waiting_dir_move(sctx
, pm
->ino
);
3245 dm
->rmdir_ino
= rmdir_ino
;
3249 fs_path_reset(name
);
3252 ret
= get_cur_path(sctx
, pm
->ino
, pm
->gen
, to_path
);
3256 ret
= send_rename(sctx
, from_path
, to_path
);
3261 struct orphan_dir_info
*odi
;
3263 odi
= get_orphan_dir_info(sctx
, rmdir_ino
);
3265 /* already deleted */
3268 ret
= can_rmdir(sctx
, rmdir_ino
, odi
->gen
, sctx
->cur_ino
);
3274 name
= fs_path_alloc();
3279 ret
= get_cur_path(sctx
, rmdir_ino
, odi
->gen
, name
);
3282 ret
= send_rmdir(sctx
, name
);
3285 free_orphan_dir_info(sctx
, odi
);
3289 ret
= send_utimes(sctx
, pm
->ino
, pm
->gen
);
3294 * After rename/move, need to update the utimes of both new parent(s)
3295 * and old parent(s).
3297 list_for_each_entry(cur
, &pm
->update_refs
, list
) {
3299 * The parent inode might have been deleted in the send snapshot
3301 ret
= get_inode_info(sctx
->send_root
, cur
->dir
, NULL
,
3302 NULL
, NULL
, NULL
, NULL
, NULL
);
3303 if (ret
== -ENOENT
) {
3310 ret
= send_utimes(sctx
, cur
->dir
, cur
->dir_gen
);
3317 fs_path_free(from_path
);
3318 fs_path_free(to_path
);
3319 sctx
->send_progress
= orig_progress
;
3324 static void free_pending_move(struct send_ctx
*sctx
, struct pending_dir_move
*m
)
3326 if (!list_empty(&m
->list
))
3328 if (!RB_EMPTY_NODE(&m
->node
))
3329 rb_erase(&m
->node
, &sctx
->pending_dir_moves
);
3330 __free_recorded_refs(&m
->update_refs
);
3334 static void tail_append_pending_moves(struct pending_dir_move
*moves
,
3335 struct list_head
*stack
)
3337 if (list_empty(&moves
->list
)) {
3338 list_add_tail(&moves
->list
, stack
);
3341 list_splice_init(&moves
->list
, &list
);
3342 list_add_tail(&moves
->list
, stack
);
3343 list_splice_tail(&list
, stack
);
3347 static int apply_children_dir_moves(struct send_ctx
*sctx
)
3349 struct pending_dir_move
*pm
;
3350 struct list_head stack
;
3351 u64 parent_ino
= sctx
->cur_ino
;
3354 pm
= get_pending_dir_moves(sctx
, parent_ino
);
3358 INIT_LIST_HEAD(&stack
);
3359 tail_append_pending_moves(pm
, &stack
);
3361 while (!list_empty(&stack
)) {
3362 pm
= list_first_entry(&stack
, struct pending_dir_move
, list
);
3363 parent_ino
= pm
->ino
;
3364 ret
= apply_dir_move(sctx
, pm
);
3365 free_pending_move(sctx
, pm
);
3368 pm
= get_pending_dir_moves(sctx
, parent_ino
);
3370 tail_append_pending_moves(pm
, &stack
);
3375 while (!list_empty(&stack
)) {
3376 pm
= list_first_entry(&stack
, struct pending_dir_move
, list
);
3377 free_pending_move(sctx
, pm
);
3383 * We might need to delay a directory rename even when no ancestor directory
3384 * (in the send root) with a higher inode number than ours (sctx->cur_ino) was
3385 * renamed. This happens when we rename a directory to the old name (the name
3386 * in the parent root) of some other unrelated directory that got its rename
3387 * delayed due to some ancestor with higher number that got renamed.
3393 * |---- a/ (ino 257)
3394 * | |---- file (ino 260)
3396 * |---- b/ (ino 258)
3397 * |---- c/ (ino 259)
3401 * |---- a/ (ino 258)
3402 * |---- x/ (ino 259)
3403 * |---- y/ (ino 257)
3404 * |----- file (ino 260)
3406 * Here we can not rename 258 from 'b' to 'a' without the rename of inode 257
3407 * from 'a' to 'x/y' happening first, which in turn depends on the rename of
3408 * inode 259 from 'c' to 'x'. So the order of rename commands the send stream
3411 * 1 - rename 259 from 'c' to 'x'
3412 * 2 - rename 257 from 'a' to 'x/y'
3413 * 3 - rename 258 from 'b' to 'a'
3415 * Returns 1 if the rename of sctx->cur_ino needs to be delayed, 0 if it can
3416 * be done right away and < 0 on error.
3418 static int wait_for_dest_dir_move(struct send_ctx
*sctx
,
3419 struct recorded_ref
*parent_ref
,
3420 const bool is_orphan
)
3422 struct btrfs_fs_info
*fs_info
= sctx
->parent_root
->fs_info
;
3423 struct btrfs_path
*path
;
3424 struct btrfs_key key
;
3425 struct btrfs_key di_key
;
3426 struct btrfs_dir_item
*di
;
3430 struct waiting_dir_move
*wdm
;
3432 if (RB_EMPTY_ROOT(&sctx
->waiting_dir_moves
))
3435 path
= alloc_path_for_send();
3439 key
.objectid
= parent_ref
->dir
;
3440 key
.type
= BTRFS_DIR_ITEM_KEY
;
3441 key
.offset
= btrfs_name_hash(parent_ref
->name
, parent_ref
->name_len
);
3443 ret
= btrfs_search_slot(NULL
, sctx
->parent_root
, &key
, path
, 0, 0);
3446 } else if (ret
> 0) {
3451 di
= btrfs_match_dir_item_name(fs_info
, path
, parent_ref
->name
,
3452 parent_ref
->name_len
);
3458 * di_key.objectid has the number of the inode that has a dentry in the
3459 * parent directory with the same name that sctx->cur_ino is being
3460 * renamed to. We need to check if that inode is in the send root as
3461 * well and if it is currently marked as an inode with a pending rename,
3462 * if it is, we need to delay the rename of sctx->cur_ino as well, so
3463 * that it happens after that other inode is renamed.
3465 btrfs_dir_item_key_to_cpu(path
->nodes
[0], di
, &di_key
);
3466 if (di_key
.type
!= BTRFS_INODE_ITEM_KEY
) {
3471 ret
= get_inode_info(sctx
->parent_root
, di_key
.objectid
, NULL
,
3472 &left_gen
, NULL
, NULL
, NULL
, NULL
);
3475 ret
= get_inode_info(sctx
->send_root
, di_key
.objectid
, NULL
,
3476 &right_gen
, NULL
, NULL
, NULL
, NULL
);
3483 /* Different inode, no need to delay the rename of sctx->cur_ino */
3484 if (right_gen
!= left_gen
) {
3489 wdm
= get_waiting_dir_move(sctx
, di_key
.objectid
);
3490 if (wdm
&& !wdm
->orphanized
) {
3491 ret
= add_pending_dir_move(sctx
,
3493 sctx
->cur_inode_gen
,
3496 &sctx
->deleted_refs
,
3502 btrfs_free_path(path
);
3507 * Check if inode ino2, or any of its ancestors, is inode ino1.
3508 * Return 1 if true, 0 if false and < 0 on error.
3510 static int check_ino_in_path(struct btrfs_root
*root
,
3515 struct fs_path
*fs_path
)
3520 return ino1_gen
== ino2_gen
;
3522 while (ino
> BTRFS_FIRST_FREE_OBJECTID
) {
3527 fs_path_reset(fs_path
);
3528 ret
= get_first_ref(root
, ino
, &parent
, &parent_gen
, fs_path
);
3532 return parent_gen
== ino1_gen
;
3539 * Check if ino ino1 is an ancestor of inode ino2 in the given root for any
3540 * possible path (in case ino2 is not a directory and has multiple hard links).
3541 * Return 1 if true, 0 if false and < 0 on error.
3543 static int is_ancestor(struct btrfs_root
*root
,
3547 struct fs_path
*fs_path
)
3549 bool free_fs_path
= false;
3551 struct btrfs_path
*path
= NULL
;
3552 struct btrfs_key key
;
3555 fs_path
= fs_path_alloc();
3558 free_fs_path
= true;
3561 path
= alloc_path_for_send();
3567 key
.objectid
= ino2
;
3568 key
.type
= BTRFS_INODE_REF_KEY
;
3571 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
3576 struct extent_buffer
*leaf
= path
->nodes
[0];
3577 int slot
= path
->slots
[0];
3581 if (slot
>= btrfs_header_nritems(leaf
)) {
3582 ret
= btrfs_next_leaf(root
, path
);
3590 btrfs_item_key_to_cpu(leaf
, &key
, slot
);
3591 if (key
.objectid
!= ino2
)
3593 if (key
.type
!= BTRFS_INODE_REF_KEY
&&
3594 key
.type
!= BTRFS_INODE_EXTREF_KEY
)
3597 item_size
= btrfs_item_size_nr(leaf
, slot
);
3598 while (cur_offset
< item_size
) {
3602 if (key
.type
== BTRFS_INODE_EXTREF_KEY
) {
3604 struct btrfs_inode_extref
*extref
;
3606 ptr
= btrfs_item_ptr_offset(leaf
, slot
);
3607 extref
= (struct btrfs_inode_extref
*)
3609 parent
= btrfs_inode_extref_parent(leaf
,
3611 cur_offset
+= sizeof(*extref
);
3612 cur_offset
+= btrfs_inode_extref_name_len(leaf
,
3615 parent
= key
.offset
;
3616 cur_offset
= item_size
;
3619 ret
= get_inode_info(root
, parent
, NULL
, &parent_gen
,
3620 NULL
, NULL
, NULL
, NULL
);
3623 ret
= check_ino_in_path(root
, ino1
, ino1_gen
,
3624 parent
, parent_gen
, fs_path
);
3632 btrfs_free_path(path
);
3634 fs_path_free(fs_path
);
3638 static int wait_for_parent_move(struct send_ctx
*sctx
,
3639 struct recorded_ref
*parent_ref
,
3640 const bool is_orphan
)
3643 u64 ino
= parent_ref
->dir
;
3644 u64 ino_gen
= parent_ref
->dir_gen
;
3645 u64 parent_ino_before
, parent_ino_after
;
3646 struct fs_path
*path_before
= NULL
;
3647 struct fs_path
*path_after
= NULL
;
3650 path_after
= fs_path_alloc();
3651 path_before
= fs_path_alloc();
3652 if (!path_after
|| !path_before
) {
3658 * Our current directory inode may not yet be renamed/moved because some
3659 * ancestor (immediate or not) has to be renamed/moved first. So find if
3660 * such ancestor exists and make sure our own rename/move happens after
3661 * that ancestor is processed to avoid path build infinite loops (done
3662 * at get_cur_path()).
3664 while (ino
> BTRFS_FIRST_FREE_OBJECTID
) {
3665 u64 parent_ino_after_gen
;
3667 if (is_waiting_for_move(sctx
, ino
)) {
3669 * If the current inode is an ancestor of ino in the
3670 * parent root, we need to delay the rename of the
3671 * current inode, otherwise don't delayed the rename
3672 * because we can end up with a circular dependency
3673 * of renames, resulting in some directories never
3674 * getting the respective rename operations issued in
3675 * the send stream or getting into infinite path build
3678 ret
= is_ancestor(sctx
->parent_root
,
3679 sctx
->cur_ino
, sctx
->cur_inode_gen
,
3685 fs_path_reset(path_before
);
3686 fs_path_reset(path_after
);
3688 ret
= get_first_ref(sctx
->send_root
, ino
, &parent_ino_after
,
3689 &parent_ino_after_gen
, path_after
);
3692 ret
= get_first_ref(sctx
->parent_root
, ino
, &parent_ino_before
,
3694 if (ret
< 0 && ret
!= -ENOENT
) {
3696 } else if (ret
== -ENOENT
) {
3701 len1
= fs_path_len(path_before
);
3702 len2
= fs_path_len(path_after
);
3703 if (ino
> sctx
->cur_ino
&&
3704 (parent_ino_before
!= parent_ino_after
|| len1
!= len2
||
3705 memcmp(path_before
->start
, path_after
->start
, len1
))) {
3708 ret
= get_inode_info(sctx
->parent_root
, ino
, NULL
,
3709 &parent_ino_gen
, NULL
, NULL
, NULL
,
3713 if (ino_gen
== parent_ino_gen
) {
3718 ino
= parent_ino_after
;
3719 ino_gen
= parent_ino_after_gen
;
3723 fs_path_free(path_before
);
3724 fs_path_free(path_after
);
3727 ret
= add_pending_dir_move(sctx
,
3729 sctx
->cur_inode_gen
,
3732 &sctx
->deleted_refs
,
3741 static int update_ref_path(struct send_ctx
*sctx
, struct recorded_ref
*ref
)
3744 struct fs_path
*new_path
;
3747 * Our reference's name member points to its full_path member string, so
3748 * we use here a new path.
3750 new_path
= fs_path_alloc();
3754 ret
= get_cur_path(sctx
, ref
->dir
, ref
->dir_gen
, new_path
);
3756 fs_path_free(new_path
);
3759 ret
= fs_path_add(new_path
, ref
->name
, ref
->name_len
);
3761 fs_path_free(new_path
);
3765 fs_path_free(ref
->full_path
);
3766 set_ref_path(ref
, new_path
);
3772 * This does all the move/link/unlink/rmdir magic.
3774 static int process_recorded_refs(struct send_ctx
*sctx
, int *pending_move
)
3776 struct btrfs_fs_info
*fs_info
= sctx
->send_root
->fs_info
;
3778 struct recorded_ref
*cur
;
3779 struct recorded_ref
*cur2
;
3780 struct list_head check_dirs
;
3781 struct fs_path
*valid_path
= NULL
;
3785 int did_overwrite
= 0;
3787 u64 last_dir_ino_rm
= 0;
3788 bool can_rename
= true;
3789 bool orphanized_dir
= false;
3790 bool orphanized_ancestor
= false;
3792 btrfs_debug(fs_info
, "process_recorded_refs %llu", sctx
->cur_ino
);
3795 * This should never happen as the root dir always has the same ref
3796 * which is always '..'
3798 BUG_ON(sctx
->cur_ino
<= BTRFS_FIRST_FREE_OBJECTID
);
3799 INIT_LIST_HEAD(&check_dirs
);
3801 valid_path
= fs_path_alloc();
3808 * First, check if the first ref of the current inode was overwritten
3809 * before. If yes, we know that the current inode was already orphanized
3810 * and thus use the orphan name. If not, we can use get_cur_path to
3811 * get the path of the first ref as it would like while receiving at
3812 * this point in time.
3813 * New inodes are always orphan at the beginning, so force to use the
3814 * orphan name in this case.
3815 * The first ref is stored in valid_path and will be updated if it
3816 * gets moved around.
3818 if (!sctx
->cur_inode_new
) {
3819 ret
= did_overwrite_first_ref(sctx
, sctx
->cur_ino
,
3820 sctx
->cur_inode_gen
);
3826 if (sctx
->cur_inode_new
|| did_overwrite
) {
3827 ret
= gen_unique_name(sctx
, sctx
->cur_ino
,
3828 sctx
->cur_inode_gen
, valid_path
);
3833 ret
= get_cur_path(sctx
, sctx
->cur_ino
, sctx
->cur_inode_gen
,
3839 list_for_each_entry(cur
, &sctx
->new_refs
, list
) {
3841 * We may have refs where the parent directory does not exist
3842 * yet. This happens if the parent directories inum is higher
3843 * the the current inum. To handle this case, we create the
3844 * parent directory out of order. But we need to check if this
3845 * did already happen before due to other refs in the same dir.
3847 ret
= get_cur_inode_state(sctx
, cur
->dir
, cur
->dir_gen
);
3850 if (ret
== inode_state_will_create
) {
3853 * First check if any of the current inodes refs did
3854 * already create the dir.
3856 list_for_each_entry(cur2
, &sctx
->new_refs
, list
) {
3859 if (cur2
->dir
== cur
->dir
) {
3866 * If that did not happen, check if a previous inode
3867 * did already create the dir.
3870 ret
= did_create_dir(sctx
, cur
->dir
);
3874 ret
= send_create_inode(sctx
, cur
->dir
);
3881 * Check if this new ref would overwrite the first ref of
3882 * another unprocessed inode. If yes, orphanize the
3883 * overwritten inode. If we find an overwritten ref that is
3884 * not the first ref, simply unlink it.
3886 ret
= will_overwrite_ref(sctx
, cur
->dir
, cur
->dir_gen
,
3887 cur
->name
, cur
->name_len
,
3888 &ow_inode
, &ow_gen
, &ow_mode
);
3892 ret
= is_first_ref(sctx
->parent_root
,
3893 ow_inode
, cur
->dir
, cur
->name
,
3898 struct name_cache_entry
*nce
;
3899 struct waiting_dir_move
*wdm
;
3901 ret
= orphanize_inode(sctx
, ow_inode
, ow_gen
,
3905 if (S_ISDIR(ow_mode
))
3906 orphanized_dir
= true;
3909 * If ow_inode has its rename operation delayed
3910 * make sure that its orphanized name is used in
3911 * the source path when performing its rename
3914 if (is_waiting_for_move(sctx
, ow_inode
)) {
3915 wdm
= get_waiting_dir_move(sctx
,
3918 wdm
->orphanized
= true;
3922 * Make sure we clear our orphanized inode's
3923 * name from the name cache. This is because the
3924 * inode ow_inode might be an ancestor of some
3925 * other inode that will be orphanized as well
3926 * later and has an inode number greater than
3927 * sctx->send_progress. We need to prevent
3928 * future name lookups from using the old name
3929 * and get instead the orphan name.
3931 nce
= name_cache_search(sctx
, ow_inode
, ow_gen
);
3933 name_cache_delete(sctx
, nce
);
3938 * ow_inode might currently be an ancestor of
3939 * cur_ino, therefore compute valid_path (the
3940 * current path of cur_ino) again because it
3941 * might contain the pre-orphanization name of
3942 * ow_inode, which is no longer valid.
3944 ret
= is_ancestor(sctx
->parent_root
,
3946 sctx
->cur_ino
, NULL
);
3948 orphanized_ancestor
= true;
3949 fs_path_reset(valid_path
);
3950 ret
= get_cur_path(sctx
, sctx
->cur_ino
,
3951 sctx
->cur_inode_gen
,
3957 ret
= send_unlink(sctx
, cur
->full_path
);
3963 if (S_ISDIR(sctx
->cur_inode_mode
) && sctx
->parent_root
) {
3964 ret
= wait_for_dest_dir_move(sctx
, cur
, is_orphan
);
3973 if (S_ISDIR(sctx
->cur_inode_mode
) && sctx
->parent_root
&&
3975 ret
= wait_for_parent_move(sctx
, cur
, is_orphan
);
3985 * link/move the ref to the new place. If we have an orphan
3986 * inode, move it and update valid_path. If not, link or move
3987 * it depending on the inode mode.
3989 if (is_orphan
&& can_rename
) {
3990 ret
= send_rename(sctx
, valid_path
, cur
->full_path
);
3994 ret
= fs_path_copy(valid_path
, cur
->full_path
);
3997 } else if (can_rename
) {
3998 if (S_ISDIR(sctx
->cur_inode_mode
)) {
4000 * Dirs can't be linked, so move it. For moved
4001 * dirs, we always have one new and one deleted
4002 * ref. The deleted ref is ignored later.
4004 ret
= send_rename(sctx
, valid_path
,
4007 ret
= fs_path_copy(valid_path
,
4013 * We might have previously orphanized an inode
4014 * which is an ancestor of our current inode,
4015 * so our reference's full path, which was
4016 * computed before any such orphanizations, must
4019 if (orphanized_dir
) {
4020 ret
= update_ref_path(sctx
, cur
);
4024 ret
= send_link(sctx
, cur
->full_path
,
4030 ret
= dup_ref(cur
, &check_dirs
);
4035 if (S_ISDIR(sctx
->cur_inode_mode
) && sctx
->cur_inode_deleted
) {
4037 * Check if we can already rmdir the directory. If not,
4038 * orphanize it. For every dir item inside that gets deleted
4039 * later, we do this check again and rmdir it then if possible.
4040 * See the use of check_dirs for more details.
4042 ret
= can_rmdir(sctx
, sctx
->cur_ino
, sctx
->cur_inode_gen
,
4047 ret
= send_rmdir(sctx
, valid_path
);
4050 } else if (!is_orphan
) {
4051 ret
= orphanize_inode(sctx
, sctx
->cur_ino
,
4052 sctx
->cur_inode_gen
, valid_path
);
4058 list_for_each_entry(cur
, &sctx
->deleted_refs
, list
) {
4059 ret
= dup_ref(cur
, &check_dirs
);
4063 } else if (S_ISDIR(sctx
->cur_inode_mode
) &&
4064 !list_empty(&sctx
->deleted_refs
)) {
4066 * We have a moved dir. Add the old parent to check_dirs
4068 cur
= list_entry(sctx
->deleted_refs
.next
, struct recorded_ref
,
4070 ret
= dup_ref(cur
, &check_dirs
);
4073 } else if (!S_ISDIR(sctx
->cur_inode_mode
)) {
4075 * We have a non dir inode. Go through all deleted refs and
4076 * unlink them if they were not already overwritten by other
4079 list_for_each_entry(cur
, &sctx
->deleted_refs
, list
) {
4080 ret
= did_overwrite_ref(sctx
, cur
->dir
, cur
->dir_gen
,
4081 sctx
->cur_ino
, sctx
->cur_inode_gen
,
4082 cur
->name
, cur
->name_len
);
4087 * If we orphanized any ancestor before, we need
4088 * to recompute the full path for deleted names,
4089 * since any such path was computed before we
4090 * processed any references and orphanized any
4093 if (orphanized_ancestor
) {
4094 ret
= update_ref_path(sctx
, cur
);
4098 ret
= send_unlink(sctx
, cur
->full_path
);
4102 ret
= dup_ref(cur
, &check_dirs
);
4107 * If the inode is still orphan, unlink the orphan. This may
4108 * happen when a previous inode did overwrite the first ref
4109 * of this inode and no new refs were added for the current
4110 * inode. Unlinking does not mean that the inode is deleted in
4111 * all cases. There may still be links to this inode in other
4115 ret
= send_unlink(sctx
, valid_path
);
4122 * We did collect all parent dirs where cur_inode was once located. We
4123 * now go through all these dirs and check if they are pending for
4124 * deletion and if it's finally possible to perform the rmdir now.
4125 * We also update the inode stats of the parent dirs here.
4127 list_for_each_entry(cur
, &check_dirs
, list
) {
4129 * In case we had refs into dirs that were not processed yet,
4130 * we don't need to do the utime and rmdir logic for these dirs.
4131 * The dir will be processed later.
4133 if (cur
->dir
> sctx
->cur_ino
)
4136 ret
= get_cur_inode_state(sctx
, cur
->dir
, cur
->dir_gen
);
4140 if (ret
== inode_state_did_create
||
4141 ret
== inode_state_no_change
) {
4142 /* TODO delayed utimes */
4143 ret
= send_utimes(sctx
, cur
->dir
, cur
->dir_gen
);
4146 } else if (ret
== inode_state_did_delete
&&
4147 cur
->dir
!= last_dir_ino_rm
) {
4148 ret
= can_rmdir(sctx
, cur
->dir
, cur
->dir_gen
,
4153 ret
= get_cur_path(sctx
, cur
->dir
,
4154 cur
->dir_gen
, valid_path
);
4157 ret
= send_rmdir(sctx
, valid_path
);
4160 last_dir_ino_rm
= cur
->dir
;
4168 __free_recorded_refs(&check_dirs
);
4169 free_recorded_refs(sctx
);
4170 fs_path_free(valid_path
);
4174 static int record_ref(struct btrfs_root
*root
, u64 dir
, struct fs_path
*name
,
4175 void *ctx
, struct list_head
*refs
)
4178 struct send_ctx
*sctx
= ctx
;
4182 p
= fs_path_alloc();
4186 ret
= get_inode_info(root
, dir
, NULL
, &gen
, NULL
, NULL
,
4191 ret
= get_cur_path(sctx
, dir
, gen
, p
);
4194 ret
= fs_path_add_path(p
, name
);
4198 ret
= __record_ref(refs
, dir
, gen
, p
);
4206 static int __record_new_ref(int num
, u64 dir
, int index
,
4207 struct fs_path
*name
,
4210 struct send_ctx
*sctx
= ctx
;
4211 return record_ref(sctx
->send_root
, dir
, name
, ctx
, &sctx
->new_refs
);
4215 static int __record_deleted_ref(int num
, u64 dir
, int index
,
4216 struct fs_path
*name
,
4219 struct send_ctx
*sctx
= ctx
;
4220 return record_ref(sctx
->parent_root
, dir
, name
, ctx
,
4221 &sctx
->deleted_refs
);
4224 static int record_new_ref(struct send_ctx
*sctx
)
4228 ret
= iterate_inode_ref(sctx
->send_root
, sctx
->left_path
,
4229 sctx
->cmp_key
, 0, __record_new_ref
, sctx
);
4238 static int record_deleted_ref(struct send_ctx
*sctx
)
4242 ret
= iterate_inode_ref(sctx
->parent_root
, sctx
->right_path
,
4243 sctx
->cmp_key
, 0, __record_deleted_ref
, sctx
);
4252 struct find_ref_ctx
{
4255 struct btrfs_root
*root
;
4256 struct fs_path
*name
;
4260 static int __find_iref(int num
, u64 dir
, int index
,
4261 struct fs_path
*name
,
4264 struct find_ref_ctx
*ctx
= ctx_
;
4268 if (dir
== ctx
->dir
&& fs_path_len(name
) == fs_path_len(ctx
->name
) &&
4269 strncmp(name
->start
, ctx
->name
->start
, fs_path_len(name
)) == 0) {
4271 * To avoid doing extra lookups we'll only do this if everything
4274 ret
= get_inode_info(ctx
->root
, dir
, NULL
, &dir_gen
, NULL
,
4278 if (dir_gen
!= ctx
->dir_gen
)
4280 ctx
->found_idx
= num
;
4286 static int find_iref(struct btrfs_root
*root
,
4287 struct btrfs_path
*path
,
4288 struct btrfs_key
*key
,
4289 u64 dir
, u64 dir_gen
, struct fs_path
*name
)
4292 struct find_ref_ctx ctx
;
4296 ctx
.dir_gen
= dir_gen
;
4300 ret
= iterate_inode_ref(root
, path
, key
, 0, __find_iref
, &ctx
);
4304 if (ctx
.found_idx
== -1)
4307 return ctx
.found_idx
;
4310 static int __record_changed_new_ref(int num
, u64 dir
, int index
,
4311 struct fs_path
*name
,
4316 struct send_ctx
*sctx
= ctx
;
4318 ret
= get_inode_info(sctx
->send_root
, dir
, NULL
, &dir_gen
, NULL
,
4323 ret
= find_iref(sctx
->parent_root
, sctx
->right_path
,
4324 sctx
->cmp_key
, dir
, dir_gen
, name
);
4326 ret
= __record_new_ref(num
, dir
, index
, name
, sctx
);
4333 static int __record_changed_deleted_ref(int num
, u64 dir
, int index
,
4334 struct fs_path
*name
,
4339 struct send_ctx
*sctx
= ctx
;
4341 ret
= get_inode_info(sctx
->parent_root
, dir
, NULL
, &dir_gen
, NULL
,
4346 ret
= find_iref(sctx
->send_root
, sctx
->left_path
, sctx
->cmp_key
,
4347 dir
, dir_gen
, name
);
4349 ret
= __record_deleted_ref(num
, dir
, index
, name
, sctx
);
4356 static int record_changed_ref(struct send_ctx
*sctx
)
4360 ret
= iterate_inode_ref(sctx
->send_root
, sctx
->left_path
,
4361 sctx
->cmp_key
, 0, __record_changed_new_ref
, sctx
);
4364 ret
= iterate_inode_ref(sctx
->parent_root
, sctx
->right_path
,
4365 sctx
->cmp_key
, 0, __record_changed_deleted_ref
, sctx
);
4375 * Record and process all refs at once. Needed when an inode changes the
4376 * generation number, which means that it was deleted and recreated.
4378 static int process_all_refs(struct send_ctx
*sctx
,
4379 enum btrfs_compare_tree_result cmd
)
4382 struct btrfs_root
*root
;
4383 struct btrfs_path
*path
;
4384 struct btrfs_key key
;
4385 struct btrfs_key found_key
;
4386 struct extent_buffer
*eb
;
4388 iterate_inode_ref_t cb
;
4389 int pending_move
= 0;
4391 path
= alloc_path_for_send();
4395 if (cmd
== BTRFS_COMPARE_TREE_NEW
) {
4396 root
= sctx
->send_root
;
4397 cb
= __record_new_ref
;
4398 } else if (cmd
== BTRFS_COMPARE_TREE_DELETED
) {
4399 root
= sctx
->parent_root
;
4400 cb
= __record_deleted_ref
;
4402 btrfs_err(sctx
->send_root
->fs_info
,
4403 "Wrong command %d in process_all_refs", cmd
);
4408 key
.objectid
= sctx
->cmp_key
->objectid
;
4409 key
.type
= BTRFS_INODE_REF_KEY
;
4411 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
4416 eb
= path
->nodes
[0];
4417 slot
= path
->slots
[0];
4418 if (slot
>= btrfs_header_nritems(eb
)) {
4419 ret
= btrfs_next_leaf(root
, path
);
4427 btrfs_item_key_to_cpu(eb
, &found_key
, slot
);
4429 if (found_key
.objectid
!= key
.objectid
||
4430 (found_key
.type
!= BTRFS_INODE_REF_KEY
&&
4431 found_key
.type
!= BTRFS_INODE_EXTREF_KEY
))
4434 ret
= iterate_inode_ref(root
, path
, &found_key
, 0, cb
, sctx
);
4440 btrfs_release_path(path
);
4443 * We don't actually care about pending_move as we are simply
4444 * re-creating this inode and will be rename'ing it into place once we
4445 * rename the parent directory.
4447 ret
= process_recorded_refs(sctx
, &pending_move
);
4449 btrfs_free_path(path
);
4453 static int send_set_xattr(struct send_ctx
*sctx
,
4454 struct fs_path
*path
,
4455 const char *name
, int name_len
,
4456 const char *data
, int data_len
)
4460 ret
= begin_cmd(sctx
, BTRFS_SEND_C_SET_XATTR
);
4464 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH
, path
);
4465 TLV_PUT_STRING(sctx
, BTRFS_SEND_A_XATTR_NAME
, name
, name_len
);
4466 TLV_PUT(sctx
, BTRFS_SEND_A_XATTR_DATA
, data
, data_len
);
4468 ret
= send_cmd(sctx
);
4475 static int send_remove_xattr(struct send_ctx
*sctx
,
4476 struct fs_path
*path
,
4477 const char *name
, int name_len
)
4481 ret
= begin_cmd(sctx
, BTRFS_SEND_C_REMOVE_XATTR
);
4485 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH
, path
);
4486 TLV_PUT_STRING(sctx
, BTRFS_SEND_A_XATTR_NAME
, name
, name_len
);
4488 ret
= send_cmd(sctx
);
4495 static int __process_new_xattr(int num
, struct btrfs_key
*di_key
,
4496 const char *name
, int name_len
,
4497 const char *data
, int data_len
,
4501 struct send_ctx
*sctx
= ctx
;
4503 struct posix_acl_xattr_header dummy_acl
;
4505 p
= fs_path_alloc();
4510 * This hack is needed because empty acls are stored as zero byte
4511 * data in xattrs. Problem with that is, that receiving these zero byte
4512 * acls will fail later. To fix this, we send a dummy acl list that
4513 * only contains the version number and no entries.
4515 if (!strncmp(name
, XATTR_NAME_POSIX_ACL_ACCESS
, name_len
) ||
4516 !strncmp(name
, XATTR_NAME_POSIX_ACL_DEFAULT
, name_len
)) {
4517 if (data_len
== 0) {
4518 dummy_acl
.a_version
=
4519 cpu_to_le32(POSIX_ACL_XATTR_VERSION
);
4520 data
= (char *)&dummy_acl
;
4521 data_len
= sizeof(dummy_acl
);
4525 ret
= get_cur_path(sctx
, sctx
->cur_ino
, sctx
->cur_inode_gen
, p
);
4529 ret
= send_set_xattr(sctx
, p
, name
, name_len
, data
, data_len
);
4536 static int __process_deleted_xattr(int num
, struct btrfs_key
*di_key
,
4537 const char *name
, int name_len
,
4538 const char *data
, int data_len
,
4542 struct send_ctx
*sctx
= ctx
;
4545 p
= fs_path_alloc();
4549 ret
= get_cur_path(sctx
, sctx
->cur_ino
, sctx
->cur_inode_gen
, p
);
4553 ret
= send_remove_xattr(sctx
, p
, name
, name_len
);
4560 static int process_new_xattr(struct send_ctx
*sctx
)
4564 ret
= iterate_dir_item(sctx
->send_root
, sctx
->left_path
,
4565 __process_new_xattr
, sctx
);
4570 static int process_deleted_xattr(struct send_ctx
*sctx
)
4572 return iterate_dir_item(sctx
->parent_root
, sctx
->right_path
,
4573 __process_deleted_xattr
, sctx
);
4576 struct find_xattr_ctx
{
4584 static int __find_xattr(int num
, struct btrfs_key
*di_key
,
4585 const char *name
, int name_len
,
4586 const char *data
, int data_len
,
4587 u8 type
, void *vctx
)
4589 struct find_xattr_ctx
*ctx
= vctx
;
4591 if (name_len
== ctx
->name_len
&&
4592 strncmp(name
, ctx
->name
, name_len
) == 0) {
4593 ctx
->found_idx
= num
;
4594 ctx
->found_data_len
= data_len
;
4595 ctx
->found_data
= kmemdup(data
, data_len
, GFP_KERNEL
);
4596 if (!ctx
->found_data
)
4603 static int find_xattr(struct btrfs_root
*root
,
4604 struct btrfs_path
*path
,
4605 struct btrfs_key
*key
,
4606 const char *name
, int name_len
,
4607 char **data
, int *data_len
)
4610 struct find_xattr_ctx ctx
;
4613 ctx
.name_len
= name_len
;
4615 ctx
.found_data
= NULL
;
4616 ctx
.found_data_len
= 0;
4618 ret
= iterate_dir_item(root
, path
, __find_xattr
, &ctx
);
4622 if (ctx
.found_idx
== -1)
4625 *data
= ctx
.found_data
;
4626 *data_len
= ctx
.found_data_len
;
4628 kfree(ctx
.found_data
);
4630 return ctx
.found_idx
;
4634 static int __process_changed_new_xattr(int num
, struct btrfs_key
*di_key
,
4635 const char *name
, int name_len
,
4636 const char *data
, int data_len
,
4640 struct send_ctx
*sctx
= ctx
;
4641 char *found_data
= NULL
;
4642 int found_data_len
= 0;
4644 ret
= find_xattr(sctx
->parent_root
, sctx
->right_path
,
4645 sctx
->cmp_key
, name
, name_len
, &found_data
,
4647 if (ret
== -ENOENT
) {
4648 ret
= __process_new_xattr(num
, di_key
, name
, name_len
, data
,
4649 data_len
, type
, ctx
);
4650 } else if (ret
>= 0) {
4651 if (data_len
!= found_data_len
||
4652 memcmp(data
, found_data
, data_len
)) {
4653 ret
= __process_new_xattr(num
, di_key
, name
, name_len
,
4654 data
, data_len
, type
, ctx
);
4664 static int __process_changed_deleted_xattr(int num
, struct btrfs_key
*di_key
,
4665 const char *name
, int name_len
,
4666 const char *data
, int data_len
,
4670 struct send_ctx
*sctx
= ctx
;
4672 ret
= find_xattr(sctx
->send_root
, sctx
->left_path
, sctx
->cmp_key
,
4673 name
, name_len
, NULL
, NULL
);
4675 ret
= __process_deleted_xattr(num
, di_key
, name
, name_len
, data
,
4676 data_len
, type
, ctx
);
4683 static int process_changed_xattr(struct send_ctx
*sctx
)
4687 ret
= iterate_dir_item(sctx
->send_root
, sctx
->left_path
,
4688 __process_changed_new_xattr
, sctx
);
4691 ret
= iterate_dir_item(sctx
->parent_root
, sctx
->right_path
,
4692 __process_changed_deleted_xattr
, sctx
);
4698 static int process_all_new_xattrs(struct send_ctx
*sctx
)
4701 struct btrfs_root
*root
;
4702 struct btrfs_path
*path
;
4703 struct btrfs_key key
;
4704 struct btrfs_key found_key
;
4705 struct extent_buffer
*eb
;
4708 path
= alloc_path_for_send();
4712 root
= sctx
->send_root
;
4714 key
.objectid
= sctx
->cmp_key
->objectid
;
4715 key
.type
= BTRFS_XATTR_ITEM_KEY
;
4717 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
4722 eb
= path
->nodes
[0];
4723 slot
= path
->slots
[0];
4724 if (slot
>= btrfs_header_nritems(eb
)) {
4725 ret
= btrfs_next_leaf(root
, path
);
4728 } else if (ret
> 0) {
4735 btrfs_item_key_to_cpu(eb
, &found_key
, slot
);
4736 if (found_key
.objectid
!= key
.objectid
||
4737 found_key
.type
!= key
.type
) {
4742 ret
= iterate_dir_item(root
, path
, __process_new_xattr
, sctx
);
4750 btrfs_free_path(path
);
4754 static ssize_t
fill_read_buf(struct send_ctx
*sctx
, u64 offset
, u32 len
)
4756 struct btrfs_root
*root
= sctx
->send_root
;
4757 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
4758 struct inode
*inode
;
4761 struct btrfs_key key
;
4762 pgoff_t index
= offset
>> PAGE_SHIFT
;
4764 unsigned pg_offset
= offset
& ~PAGE_MASK
;
4767 key
.objectid
= sctx
->cur_ino
;
4768 key
.type
= BTRFS_INODE_ITEM_KEY
;
4771 inode
= btrfs_iget(fs_info
->sb
, &key
, root
, NULL
);
4773 return PTR_ERR(inode
);
4775 if (offset
+ len
> i_size_read(inode
)) {
4776 if (offset
> i_size_read(inode
))
4779 len
= offset
- i_size_read(inode
);
4784 last_index
= (offset
+ len
- 1) >> PAGE_SHIFT
;
4786 /* initial readahead */
4787 memset(&sctx
->ra
, 0, sizeof(struct file_ra_state
));
4788 file_ra_state_init(&sctx
->ra
, inode
->i_mapping
);
4790 while (index
<= last_index
) {
4791 unsigned cur_len
= min_t(unsigned, len
,
4792 PAGE_SIZE
- pg_offset
);
4794 page
= find_lock_page(inode
->i_mapping
, index
);
4796 page_cache_sync_readahead(inode
->i_mapping
, &sctx
->ra
,
4797 NULL
, index
, last_index
+ 1 - index
);
4799 page
= find_or_create_page(inode
->i_mapping
, index
,
4807 if (PageReadahead(page
)) {
4808 page_cache_async_readahead(inode
->i_mapping
, &sctx
->ra
,
4809 NULL
, page
, index
, last_index
+ 1 - index
);
4812 if (!PageUptodate(page
)) {
4813 btrfs_readpage(NULL
, page
);
4815 if (!PageUptodate(page
)) {
4824 memcpy(sctx
->read_buf
+ ret
, addr
+ pg_offset
, cur_len
);
4839 * Read some bytes from the current inode/file and send a write command to
4842 static int send_write(struct send_ctx
*sctx
, u64 offset
, u32 len
)
4844 struct btrfs_fs_info
*fs_info
= sctx
->send_root
->fs_info
;
4847 ssize_t num_read
= 0;
4849 p
= fs_path_alloc();
4853 btrfs_debug(fs_info
, "send_write offset=%llu, len=%d", offset
, len
);
4855 num_read
= fill_read_buf(sctx
, offset
, len
);
4856 if (num_read
<= 0) {
4862 ret
= begin_cmd(sctx
, BTRFS_SEND_C_WRITE
);
4866 ret
= get_cur_path(sctx
, sctx
->cur_ino
, sctx
->cur_inode_gen
, p
);
4870 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH
, p
);
4871 TLV_PUT_U64(sctx
, BTRFS_SEND_A_FILE_OFFSET
, offset
);
4872 TLV_PUT(sctx
, BTRFS_SEND_A_DATA
, sctx
->read_buf
, num_read
);
4874 ret
= send_cmd(sctx
);
4885 * Send a clone command to user space.
4887 static int send_clone(struct send_ctx
*sctx
,
4888 u64 offset
, u32 len
,
4889 struct clone_root
*clone_root
)
4895 btrfs_debug(sctx
->send_root
->fs_info
,
4896 "send_clone offset=%llu, len=%d, clone_root=%llu, clone_inode=%llu, clone_offset=%llu",
4897 offset
, len
, clone_root
->root
->objectid
, clone_root
->ino
,
4898 clone_root
->offset
);
4900 p
= fs_path_alloc();
4904 ret
= begin_cmd(sctx
, BTRFS_SEND_C_CLONE
);
4908 ret
= get_cur_path(sctx
, sctx
->cur_ino
, sctx
->cur_inode_gen
, p
);
4912 TLV_PUT_U64(sctx
, BTRFS_SEND_A_FILE_OFFSET
, offset
);
4913 TLV_PUT_U64(sctx
, BTRFS_SEND_A_CLONE_LEN
, len
);
4914 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH
, p
);
4916 if (clone_root
->root
== sctx
->send_root
) {
4917 ret
= get_inode_info(sctx
->send_root
, clone_root
->ino
, NULL
,
4918 &gen
, NULL
, NULL
, NULL
, NULL
);
4921 ret
= get_cur_path(sctx
, clone_root
->ino
, gen
, p
);
4923 ret
= get_inode_path(clone_root
->root
, clone_root
->ino
, p
);
4929 * If the parent we're using has a received_uuid set then use that as
4930 * our clone source as that is what we will look for when doing a
4933 * This covers the case that we create a snapshot off of a received
4934 * subvolume and then use that as the parent and try to receive on a
4937 if (!btrfs_is_empty_uuid(clone_root
->root
->root_item
.received_uuid
))
4938 TLV_PUT_UUID(sctx
, BTRFS_SEND_A_CLONE_UUID
,
4939 clone_root
->root
->root_item
.received_uuid
);
4941 TLV_PUT_UUID(sctx
, BTRFS_SEND_A_CLONE_UUID
,
4942 clone_root
->root
->root_item
.uuid
);
4943 TLV_PUT_U64(sctx
, BTRFS_SEND_A_CLONE_CTRANSID
,
4944 le64_to_cpu(clone_root
->root
->root_item
.ctransid
));
4945 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_CLONE_PATH
, p
);
4946 TLV_PUT_U64(sctx
, BTRFS_SEND_A_CLONE_OFFSET
,
4947 clone_root
->offset
);
4949 ret
= send_cmd(sctx
);
4958 * Send an update extent command to user space.
4960 static int send_update_extent(struct send_ctx
*sctx
,
4961 u64 offset
, u32 len
)
4966 p
= fs_path_alloc();
4970 ret
= begin_cmd(sctx
, BTRFS_SEND_C_UPDATE_EXTENT
);
4974 ret
= get_cur_path(sctx
, sctx
->cur_ino
, sctx
->cur_inode_gen
, p
);
4978 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH
, p
);
4979 TLV_PUT_U64(sctx
, BTRFS_SEND_A_FILE_OFFSET
, offset
);
4980 TLV_PUT_U64(sctx
, BTRFS_SEND_A_SIZE
, len
);
4982 ret
= send_cmd(sctx
);
4990 static int send_hole(struct send_ctx
*sctx
, u64 end
)
4992 struct fs_path
*p
= NULL
;
4993 u64 offset
= sctx
->cur_inode_last_extent
;
4997 if (sctx
->flags
& BTRFS_SEND_FLAG_NO_FILE_DATA
)
4998 return send_update_extent(sctx
, offset
, end
- offset
);
5000 p
= fs_path_alloc();
5003 ret
= get_cur_path(sctx
, sctx
->cur_ino
, sctx
->cur_inode_gen
, p
);
5005 goto tlv_put_failure
;
5006 memset(sctx
->read_buf
, 0, BTRFS_SEND_READ_SIZE
);
5007 while (offset
< end
) {
5008 len
= min_t(u64
, end
- offset
, BTRFS_SEND_READ_SIZE
);
5010 ret
= begin_cmd(sctx
, BTRFS_SEND_C_WRITE
);
5013 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH
, p
);
5014 TLV_PUT_U64(sctx
, BTRFS_SEND_A_FILE_OFFSET
, offset
);
5015 TLV_PUT(sctx
, BTRFS_SEND_A_DATA
, sctx
->read_buf
, len
);
5016 ret
= send_cmd(sctx
);
5021 sctx
->cur_inode_next_write_offset
= offset
;
5027 static int send_extent_data(struct send_ctx
*sctx
,
5033 if (sctx
->flags
& BTRFS_SEND_FLAG_NO_FILE_DATA
)
5034 return send_update_extent(sctx
, offset
, len
);
5036 while (sent
< len
) {
5037 u64 size
= len
- sent
;
5040 if (size
> BTRFS_SEND_READ_SIZE
)
5041 size
= BTRFS_SEND_READ_SIZE
;
5042 ret
= send_write(sctx
, offset
+ sent
, size
);
5052 static int clone_range(struct send_ctx
*sctx
,
5053 struct clone_root
*clone_root
,
5054 const u64 disk_byte
,
5059 struct btrfs_path
*path
;
5060 struct btrfs_key key
;
5064 * Prevent cloning from a zero offset with a length matching the sector
5065 * size because in some scenarios this will make the receiver fail.
5067 * For example, if in the source filesystem the extent at offset 0
5068 * has a length of sectorsize and it was written using direct IO, then
5069 * it can never be an inline extent (even if compression is enabled).
5070 * Then this extent can be cloned in the original filesystem to a non
5071 * zero file offset, but it may not be possible to clone in the
5072 * destination filesystem because it can be inlined due to compression
5073 * on the destination filesystem (as the receiver's write operations are
5074 * always done using buffered IO). The same happens when the original
5075 * filesystem does not have compression enabled but the destination
5078 if (clone_root
->offset
== 0 &&
5079 len
== sctx
->send_root
->fs_info
->sectorsize
)
5080 return send_extent_data(sctx
, offset
, len
);
5082 path
= alloc_path_for_send();
5087 * We can't send a clone operation for the entire range if we find
5088 * extent items in the respective range in the source file that
5089 * refer to different extents or if we find holes.
5090 * So check for that and do a mix of clone and regular write/copy
5091 * operations if needed.
5095 * mkfs.btrfs -f /dev/sda
5096 * mount /dev/sda /mnt
5097 * xfs_io -f -c "pwrite -S 0xaa 0K 100K" /mnt/foo
5098 * cp --reflink=always /mnt/foo /mnt/bar
5099 * xfs_io -c "pwrite -S 0xbb 50K 50K" /mnt/foo
5100 * btrfs subvolume snapshot -r /mnt /mnt/snap
5102 * If when we send the snapshot and we are processing file bar (which
5103 * has a higher inode number than foo) we blindly send a clone operation
5104 * for the [0, 100K[ range from foo to bar, the receiver ends up getting
5105 * a file bar that matches the content of file foo - iow, doesn't match
5106 * the content from bar in the original filesystem.
5108 key
.objectid
= clone_root
->ino
;
5109 key
.type
= BTRFS_EXTENT_DATA_KEY
;
5110 key
.offset
= clone_root
->offset
;
5111 ret
= btrfs_search_slot(NULL
, clone_root
->root
, &key
, path
, 0, 0);
5114 if (ret
> 0 && path
->slots
[0] > 0) {
5115 btrfs_item_key_to_cpu(path
->nodes
[0], &key
, path
->slots
[0] - 1);
5116 if (key
.objectid
== clone_root
->ino
&&
5117 key
.type
== BTRFS_EXTENT_DATA_KEY
)
5122 struct extent_buffer
*leaf
= path
->nodes
[0];
5123 int slot
= path
->slots
[0];
5124 struct btrfs_file_extent_item
*ei
;
5129 if (slot
>= btrfs_header_nritems(leaf
)) {
5130 ret
= btrfs_next_leaf(clone_root
->root
, path
);
5138 btrfs_item_key_to_cpu(leaf
, &key
, slot
);
5141 * We might have an implicit trailing hole (NO_HOLES feature
5142 * enabled). We deal with it after leaving this loop.
5144 if (key
.objectid
!= clone_root
->ino
||
5145 key
.type
!= BTRFS_EXTENT_DATA_KEY
)
5148 ei
= btrfs_item_ptr(leaf
, slot
, struct btrfs_file_extent_item
);
5149 type
= btrfs_file_extent_type(leaf
, ei
);
5150 if (type
== BTRFS_FILE_EXTENT_INLINE
) {
5151 ext_len
= btrfs_file_extent_inline_len(leaf
, slot
, ei
);
5152 ext_len
= PAGE_ALIGN(ext_len
);
5154 ext_len
= btrfs_file_extent_num_bytes(leaf
, ei
);
5157 if (key
.offset
+ ext_len
<= clone_root
->offset
)
5160 if (key
.offset
> clone_root
->offset
) {
5161 /* Implicit hole, NO_HOLES feature enabled. */
5162 u64 hole_len
= key
.offset
- clone_root
->offset
;
5166 ret
= send_extent_data(sctx
, offset
, hole_len
);
5174 clone_root
->offset
+= hole_len
;
5175 data_offset
+= hole_len
;
5178 if (key
.offset
>= clone_root
->offset
+ len
)
5181 clone_len
= min_t(u64
, ext_len
, len
);
5183 if (btrfs_file_extent_disk_bytenr(leaf
, ei
) == disk_byte
&&
5184 btrfs_file_extent_offset(leaf
, ei
) == data_offset
)
5185 ret
= send_clone(sctx
, offset
, clone_len
, clone_root
);
5187 ret
= send_extent_data(sctx
, offset
, clone_len
);
5195 offset
+= clone_len
;
5196 clone_root
->offset
+= clone_len
;
5197 data_offset
+= clone_len
;
5203 ret
= send_extent_data(sctx
, offset
, len
);
5207 btrfs_free_path(path
);
5211 static int send_write_or_clone(struct send_ctx
*sctx
,
5212 struct btrfs_path
*path
,
5213 struct btrfs_key
*key
,
5214 struct clone_root
*clone_root
)
5217 struct btrfs_file_extent_item
*ei
;
5218 u64 offset
= key
->offset
;
5221 u64 bs
= sctx
->send_root
->fs_info
->sb
->s_blocksize
;
5223 ei
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
5224 struct btrfs_file_extent_item
);
5225 type
= btrfs_file_extent_type(path
->nodes
[0], ei
);
5226 if (type
== BTRFS_FILE_EXTENT_INLINE
) {
5227 len
= btrfs_file_extent_inline_len(path
->nodes
[0],
5228 path
->slots
[0], ei
);
5230 * it is possible the inline item won't cover the whole page,
5231 * but there may be items after this page. Make
5232 * sure to send the whole thing
5234 len
= PAGE_ALIGN(len
);
5236 len
= btrfs_file_extent_num_bytes(path
->nodes
[0], ei
);
5239 if (offset
>= sctx
->cur_inode_size
) {
5243 if (offset
+ len
> sctx
->cur_inode_size
)
5244 len
= sctx
->cur_inode_size
- offset
;
5250 if (clone_root
&& IS_ALIGNED(offset
+ len
, bs
)) {
5254 disk_byte
= btrfs_file_extent_disk_bytenr(path
->nodes
[0], ei
);
5255 data_offset
= btrfs_file_extent_offset(path
->nodes
[0], ei
);
5256 ret
= clone_range(sctx
, clone_root
, disk_byte
, data_offset
,
5259 ret
= send_extent_data(sctx
, offset
, len
);
5261 sctx
->cur_inode_next_write_offset
= offset
+ len
;
5266 static int is_extent_unchanged(struct send_ctx
*sctx
,
5267 struct btrfs_path
*left_path
,
5268 struct btrfs_key
*ekey
)
5271 struct btrfs_key key
;
5272 struct btrfs_path
*path
= NULL
;
5273 struct extent_buffer
*eb
;
5275 struct btrfs_key found_key
;
5276 struct btrfs_file_extent_item
*ei
;
5281 u64 left_offset_fixed
;
5289 path
= alloc_path_for_send();
5293 eb
= left_path
->nodes
[0];
5294 slot
= left_path
->slots
[0];
5295 ei
= btrfs_item_ptr(eb
, slot
, struct btrfs_file_extent_item
);
5296 left_type
= btrfs_file_extent_type(eb
, ei
);
5298 if (left_type
!= BTRFS_FILE_EXTENT_REG
) {
5302 left_disknr
= btrfs_file_extent_disk_bytenr(eb
, ei
);
5303 left_len
= btrfs_file_extent_num_bytes(eb
, ei
);
5304 left_offset
= btrfs_file_extent_offset(eb
, ei
);
5305 left_gen
= btrfs_file_extent_generation(eb
, ei
);
5308 * Following comments will refer to these graphics. L is the left
5309 * extents which we are checking at the moment. 1-8 are the right
5310 * extents that we iterate.
5313 * |-1-|-2a-|-3-|-4-|-5-|-6-|
5316 * |--1--|-2b-|...(same as above)
5318 * Alternative situation. Happens on files where extents got split.
5320 * |-----------7-----------|-6-|
5322 * Alternative situation. Happens on files which got larger.
5325 * Nothing follows after 8.
5328 key
.objectid
= ekey
->objectid
;
5329 key
.type
= BTRFS_EXTENT_DATA_KEY
;
5330 key
.offset
= ekey
->offset
;
5331 ret
= btrfs_search_slot_for_read(sctx
->parent_root
, &key
, path
, 0, 0);
5340 * Handle special case where the right side has no extents at all.
5342 eb
= path
->nodes
[0];
5343 slot
= path
->slots
[0];
5344 btrfs_item_key_to_cpu(eb
, &found_key
, slot
);
5345 if (found_key
.objectid
!= key
.objectid
||
5346 found_key
.type
!= key
.type
) {
5347 /* If we're a hole then just pretend nothing changed */
5348 ret
= (left_disknr
) ? 0 : 1;
5353 * We're now on 2a, 2b or 7.
5356 while (key
.offset
< ekey
->offset
+ left_len
) {
5357 ei
= btrfs_item_ptr(eb
, slot
, struct btrfs_file_extent_item
);
5358 right_type
= btrfs_file_extent_type(eb
, ei
);
5359 if (right_type
!= BTRFS_FILE_EXTENT_REG
&&
5360 right_type
!= BTRFS_FILE_EXTENT_INLINE
) {
5365 if (right_type
== BTRFS_FILE_EXTENT_INLINE
) {
5366 right_len
= btrfs_file_extent_inline_len(eb
, slot
, ei
);
5367 right_len
= PAGE_ALIGN(right_len
);
5369 right_len
= btrfs_file_extent_num_bytes(eb
, ei
);
5373 * Are we at extent 8? If yes, we know the extent is changed.
5374 * This may only happen on the first iteration.
5376 if (found_key
.offset
+ right_len
<= ekey
->offset
) {
5377 /* If we're a hole just pretend nothing changed */
5378 ret
= (left_disknr
) ? 0 : 1;
5383 * We just wanted to see if when we have an inline extent, what
5384 * follows it is a regular extent (wanted to check the above
5385 * condition for inline extents too). This should normally not
5386 * happen but it's possible for example when we have an inline
5387 * compressed extent representing data with a size matching
5388 * the page size (currently the same as sector size).
5390 if (right_type
== BTRFS_FILE_EXTENT_INLINE
) {
5395 right_disknr
= btrfs_file_extent_disk_bytenr(eb
, ei
);
5396 right_offset
= btrfs_file_extent_offset(eb
, ei
);
5397 right_gen
= btrfs_file_extent_generation(eb
, ei
);
5399 left_offset_fixed
= left_offset
;
5400 if (key
.offset
< ekey
->offset
) {
5401 /* Fix the right offset for 2a and 7. */
5402 right_offset
+= ekey
->offset
- key
.offset
;
5404 /* Fix the left offset for all behind 2a and 2b */
5405 left_offset_fixed
+= key
.offset
- ekey
->offset
;
5409 * Check if we have the same extent.
5411 if (left_disknr
!= right_disknr
||
5412 left_offset_fixed
!= right_offset
||
5413 left_gen
!= right_gen
) {
5419 * Go to the next extent.
5421 ret
= btrfs_next_item(sctx
->parent_root
, path
);
5425 eb
= path
->nodes
[0];
5426 slot
= path
->slots
[0];
5427 btrfs_item_key_to_cpu(eb
, &found_key
, slot
);
5429 if (ret
|| found_key
.objectid
!= key
.objectid
||
5430 found_key
.type
!= key
.type
) {
5431 key
.offset
+= right_len
;
5434 if (found_key
.offset
!= key
.offset
+ right_len
) {
5442 * We're now behind the left extent (treat as unchanged) or at the end
5443 * of the right side (treat as changed).
5445 if (key
.offset
>= ekey
->offset
+ left_len
)
5452 btrfs_free_path(path
);
5456 static int get_last_extent(struct send_ctx
*sctx
, u64 offset
)
5458 struct btrfs_path
*path
;
5459 struct btrfs_root
*root
= sctx
->send_root
;
5460 struct btrfs_file_extent_item
*fi
;
5461 struct btrfs_key key
;
5466 path
= alloc_path_for_send();
5470 sctx
->cur_inode_last_extent
= 0;
5472 key
.objectid
= sctx
->cur_ino
;
5473 key
.type
= BTRFS_EXTENT_DATA_KEY
;
5474 key
.offset
= offset
;
5475 ret
= btrfs_search_slot_for_read(root
, &key
, path
, 0, 1);
5479 btrfs_item_key_to_cpu(path
->nodes
[0], &key
, path
->slots
[0]);
5480 if (key
.objectid
!= sctx
->cur_ino
|| key
.type
!= BTRFS_EXTENT_DATA_KEY
)
5483 fi
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
5484 struct btrfs_file_extent_item
);
5485 type
= btrfs_file_extent_type(path
->nodes
[0], fi
);
5486 if (type
== BTRFS_FILE_EXTENT_INLINE
) {
5487 u64 size
= btrfs_file_extent_inline_len(path
->nodes
[0],
5488 path
->slots
[0], fi
);
5489 extent_end
= ALIGN(key
.offset
+ size
,
5490 sctx
->send_root
->fs_info
->sectorsize
);
5492 extent_end
= key
.offset
+
5493 btrfs_file_extent_num_bytes(path
->nodes
[0], fi
);
5495 sctx
->cur_inode_last_extent
= extent_end
;
5497 btrfs_free_path(path
);
5501 static int range_is_hole_in_parent(struct send_ctx
*sctx
,
5505 struct btrfs_path
*path
;
5506 struct btrfs_key key
;
5507 struct btrfs_root
*root
= sctx
->parent_root
;
5508 u64 search_start
= start
;
5511 path
= alloc_path_for_send();
5515 key
.objectid
= sctx
->cur_ino
;
5516 key
.type
= BTRFS_EXTENT_DATA_KEY
;
5517 key
.offset
= search_start
;
5518 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
5521 if (ret
> 0 && path
->slots
[0] > 0)
5524 while (search_start
< end
) {
5525 struct extent_buffer
*leaf
= path
->nodes
[0];
5526 int slot
= path
->slots
[0];
5527 struct btrfs_file_extent_item
*fi
;
5530 if (slot
>= btrfs_header_nritems(leaf
)) {
5531 ret
= btrfs_next_leaf(root
, path
);
5539 btrfs_item_key_to_cpu(leaf
, &key
, slot
);
5540 if (key
.objectid
< sctx
->cur_ino
||
5541 key
.type
< BTRFS_EXTENT_DATA_KEY
)
5543 if (key
.objectid
> sctx
->cur_ino
||
5544 key
.type
> BTRFS_EXTENT_DATA_KEY
||
5548 fi
= btrfs_item_ptr(leaf
, slot
, struct btrfs_file_extent_item
);
5549 if (btrfs_file_extent_type(leaf
, fi
) ==
5550 BTRFS_FILE_EXTENT_INLINE
) {
5551 u64 size
= btrfs_file_extent_inline_len(leaf
, slot
, fi
);
5553 extent_end
= ALIGN(key
.offset
+ size
,
5554 root
->fs_info
->sectorsize
);
5556 extent_end
= key
.offset
+
5557 btrfs_file_extent_num_bytes(leaf
, fi
);
5559 if (extent_end
<= start
)
5561 if (btrfs_file_extent_disk_bytenr(leaf
, fi
) == 0) {
5562 search_start
= extent_end
;
5572 btrfs_free_path(path
);
5576 static int maybe_send_hole(struct send_ctx
*sctx
, struct btrfs_path
*path
,
5577 struct btrfs_key
*key
)
5579 struct btrfs_file_extent_item
*fi
;
5584 if (sctx
->cur_ino
!= key
->objectid
|| !need_send_hole(sctx
))
5587 if (sctx
->cur_inode_last_extent
== (u64
)-1) {
5588 ret
= get_last_extent(sctx
, key
->offset
- 1);
5593 fi
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
5594 struct btrfs_file_extent_item
);
5595 type
= btrfs_file_extent_type(path
->nodes
[0], fi
);
5596 if (type
== BTRFS_FILE_EXTENT_INLINE
) {
5597 u64 size
= btrfs_file_extent_inline_len(path
->nodes
[0],
5598 path
->slots
[0], fi
);
5599 extent_end
= ALIGN(key
->offset
+ size
,
5600 sctx
->send_root
->fs_info
->sectorsize
);
5602 extent_end
= key
->offset
+
5603 btrfs_file_extent_num_bytes(path
->nodes
[0], fi
);
5606 if (path
->slots
[0] == 0 &&
5607 sctx
->cur_inode_last_extent
< key
->offset
) {
5609 * We might have skipped entire leafs that contained only
5610 * file extent items for our current inode. These leafs have
5611 * a generation number smaller (older) than the one in the
5612 * current leaf and the leaf our last extent came from, and
5613 * are located between these 2 leafs.
5615 ret
= get_last_extent(sctx
, key
->offset
- 1);
5620 if (sctx
->cur_inode_last_extent
< key
->offset
) {
5621 ret
= range_is_hole_in_parent(sctx
,
5622 sctx
->cur_inode_last_extent
,
5627 ret
= send_hole(sctx
, key
->offset
);
5631 sctx
->cur_inode_last_extent
= extent_end
;
5635 static int process_extent(struct send_ctx
*sctx
,
5636 struct btrfs_path
*path
,
5637 struct btrfs_key
*key
)
5639 struct clone_root
*found_clone
= NULL
;
5642 if (S_ISLNK(sctx
->cur_inode_mode
))
5645 if (sctx
->parent_root
&& !sctx
->cur_inode_new
) {
5646 ret
= is_extent_unchanged(sctx
, path
, key
);
5654 struct btrfs_file_extent_item
*ei
;
5657 ei
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
5658 struct btrfs_file_extent_item
);
5659 type
= btrfs_file_extent_type(path
->nodes
[0], ei
);
5660 if (type
== BTRFS_FILE_EXTENT_PREALLOC
||
5661 type
== BTRFS_FILE_EXTENT_REG
) {
5663 * The send spec does not have a prealloc command yet,
5664 * so just leave a hole for prealloc'ed extents until
5665 * we have enough commands queued up to justify rev'ing
5668 if (type
== BTRFS_FILE_EXTENT_PREALLOC
) {
5673 /* Have a hole, just skip it. */
5674 if (btrfs_file_extent_disk_bytenr(path
->nodes
[0], ei
) == 0) {
5681 ret
= find_extent_clone(sctx
, path
, key
->objectid
, key
->offset
,
5682 sctx
->cur_inode_size
, &found_clone
);
5683 if (ret
!= -ENOENT
&& ret
< 0)
5686 ret
= send_write_or_clone(sctx
, path
, key
, found_clone
);
5690 ret
= maybe_send_hole(sctx
, path
, key
);
5695 static int process_all_extents(struct send_ctx
*sctx
)
5698 struct btrfs_root
*root
;
5699 struct btrfs_path
*path
;
5700 struct btrfs_key key
;
5701 struct btrfs_key found_key
;
5702 struct extent_buffer
*eb
;
5705 root
= sctx
->send_root
;
5706 path
= alloc_path_for_send();
5710 key
.objectid
= sctx
->cmp_key
->objectid
;
5711 key
.type
= BTRFS_EXTENT_DATA_KEY
;
5713 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
5718 eb
= path
->nodes
[0];
5719 slot
= path
->slots
[0];
5721 if (slot
>= btrfs_header_nritems(eb
)) {
5722 ret
= btrfs_next_leaf(root
, path
);
5725 } else if (ret
> 0) {
5732 btrfs_item_key_to_cpu(eb
, &found_key
, slot
);
5734 if (found_key
.objectid
!= key
.objectid
||
5735 found_key
.type
!= key
.type
) {
5740 ret
= process_extent(sctx
, path
, &found_key
);
5748 btrfs_free_path(path
);
5752 static int process_recorded_refs_if_needed(struct send_ctx
*sctx
, int at_end
,
5754 int *refs_processed
)
5758 if (sctx
->cur_ino
== 0)
5760 if (!at_end
&& sctx
->cur_ino
== sctx
->cmp_key
->objectid
&&
5761 sctx
->cmp_key
->type
<= BTRFS_INODE_EXTREF_KEY
)
5763 if (list_empty(&sctx
->new_refs
) && list_empty(&sctx
->deleted_refs
))
5766 ret
= process_recorded_refs(sctx
, pending_move
);
5770 *refs_processed
= 1;
5775 static int finish_inode_if_needed(struct send_ctx
*sctx
, int at_end
)
5786 int need_truncate
= 1;
5787 int pending_move
= 0;
5788 int refs_processed
= 0;
5790 ret
= process_recorded_refs_if_needed(sctx
, at_end
, &pending_move
,
5796 * We have processed the refs and thus need to advance send_progress.
5797 * Now, calls to get_cur_xxx will take the updated refs of the current
5798 * inode into account.
5800 * On the other hand, if our current inode is a directory and couldn't
5801 * be moved/renamed because its parent was renamed/moved too and it has
5802 * a higher inode number, we can only move/rename our current inode
5803 * after we moved/renamed its parent. Therefore in this case operate on
5804 * the old path (pre move/rename) of our current inode, and the
5805 * move/rename will be performed later.
5807 if (refs_processed
&& !pending_move
)
5808 sctx
->send_progress
= sctx
->cur_ino
+ 1;
5810 if (sctx
->cur_ino
== 0 || sctx
->cur_inode_deleted
)
5812 if (!at_end
&& sctx
->cmp_key
->objectid
== sctx
->cur_ino
)
5815 ret
= get_inode_info(sctx
->send_root
, sctx
->cur_ino
, NULL
, NULL
,
5816 &left_mode
, &left_uid
, &left_gid
, NULL
);
5820 if (!sctx
->parent_root
|| sctx
->cur_inode_new
) {
5822 if (!S_ISLNK(sctx
->cur_inode_mode
))
5824 if (sctx
->cur_inode_next_write_offset
== sctx
->cur_inode_size
)
5829 ret
= get_inode_info(sctx
->parent_root
, sctx
->cur_ino
,
5830 &old_size
, NULL
, &right_mode
, &right_uid
,
5835 if (left_uid
!= right_uid
|| left_gid
!= right_gid
)
5837 if (!S_ISLNK(sctx
->cur_inode_mode
) && left_mode
!= right_mode
)
5839 if ((old_size
== sctx
->cur_inode_size
) ||
5840 (sctx
->cur_inode_size
> old_size
&&
5841 sctx
->cur_inode_next_write_offset
== sctx
->cur_inode_size
))
5845 if (S_ISREG(sctx
->cur_inode_mode
)) {
5846 if (need_send_hole(sctx
)) {
5847 if (sctx
->cur_inode_last_extent
== (u64
)-1 ||
5848 sctx
->cur_inode_last_extent
<
5849 sctx
->cur_inode_size
) {
5850 ret
= get_last_extent(sctx
, (u64
)-1);
5854 if (sctx
->cur_inode_last_extent
<
5855 sctx
->cur_inode_size
) {
5856 ret
= send_hole(sctx
, sctx
->cur_inode_size
);
5861 if (need_truncate
) {
5862 ret
= send_truncate(sctx
, sctx
->cur_ino
,
5863 sctx
->cur_inode_gen
,
5864 sctx
->cur_inode_size
);
5871 ret
= send_chown(sctx
, sctx
->cur_ino
, sctx
->cur_inode_gen
,
5872 left_uid
, left_gid
);
5877 ret
= send_chmod(sctx
, sctx
->cur_ino
, sctx
->cur_inode_gen
,
5884 * If other directory inodes depended on our current directory
5885 * inode's move/rename, now do their move/rename operations.
5887 if (!is_waiting_for_move(sctx
, sctx
->cur_ino
)) {
5888 ret
= apply_children_dir_moves(sctx
);
5892 * Need to send that every time, no matter if it actually
5893 * changed between the two trees as we have done changes to
5894 * the inode before. If our inode is a directory and it's
5895 * waiting to be moved/renamed, we will send its utimes when
5896 * it's moved/renamed, therefore we don't need to do it here.
5898 sctx
->send_progress
= sctx
->cur_ino
+ 1;
5899 ret
= send_utimes(sctx
, sctx
->cur_ino
, sctx
->cur_inode_gen
);
5908 static int changed_inode(struct send_ctx
*sctx
,
5909 enum btrfs_compare_tree_result result
)
5912 struct btrfs_key
*key
= sctx
->cmp_key
;
5913 struct btrfs_inode_item
*left_ii
= NULL
;
5914 struct btrfs_inode_item
*right_ii
= NULL
;
5918 sctx
->cur_ino
= key
->objectid
;
5919 sctx
->cur_inode_new_gen
= 0;
5920 sctx
->cur_inode_last_extent
= (u64
)-1;
5921 sctx
->cur_inode_next_write_offset
= 0;
5924 * Set send_progress to current inode. This will tell all get_cur_xxx
5925 * functions that the current inode's refs are not updated yet. Later,
5926 * when process_recorded_refs is finished, it is set to cur_ino + 1.
5928 sctx
->send_progress
= sctx
->cur_ino
;
5930 if (result
== BTRFS_COMPARE_TREE_NEW
||
5931 result
== BTRFS_COMPARE_TREE_CHANGED
) {
5932 left_ii
= btrfs_item_ptr(sctx
->left_path
->nodes
[0],
5933 sctx
->left_path
->slots
[0],
5934 struct btrfs_inode_item
);
5935 left_gen
= btrfs_inode_generation(sctx
->left_path
->nodes
[0],
5938 right_ii
= btrfs_item_ptr(sctx
->right_path
->nodes
[0],
5939 sctx
->right_path
->slots
[0],
5940 struct btrfs_inode_item
);
5941 right_gen
= btrfs_inode_generation(sctx
->right_path
->nodes
[0],
5944 if (result
== BTRFS_COMPARE_TREE_CHANGED
) {
5945 right_ii
= btrfs_item_ptr(sctx
->right_path
->nodes
[0],
5946 sctx
->right_path
->slots
[0],
5947 struct btrfs_inode_item
);
5949 right_gen
= btrfs_inode_generation(sctx
->right_path
->nodes
[0],
5953 * The cur_ino = root dir case is special here. We can't treat
5954 * the inode as deleted+reused because it would generate a
5955 * stream that tries to delete/mkdir the root dir.
5957 if (left_gen
!= right_gen
&&
5958 sctx
->cur_ino
!= BTRFS_FIRST_FREE_OBJECTID
)
5959 sctx
->cur_inode_new_gen
= 1;
5962 if (result
== BTRFS_COMPARE_TREE_NEW
) {
5963 sctx
->cur_inode_gen
= left_gen
;
5964 sctx
->cur_inode_new
= 1;
5965 sctx
->cur_inode_deleted
= 0;
5966 sctx
->cur_inode_size
= btrfs_inode_size(
5967 sctx
->left_path
->nodes
[0], left_ii
);
5968 sctx
->cur_inode_mode
= btrfs_inode_mode(
5969 sctx
->left_path
->nodes
[0], left_ii
);
5970 sctx
->cur_inode_rdev
= btrfs_inode_rdev(
5971 sctx
->left_path
->nodes
[0], left_ii
);
5972 if (sctx
->cur_ino
!= BTRFS_FIRST_FREE_OBJECTID
)
5973 ret
= send_create_inode_if_needed(sctx
);
5974 } else if (result
== BTRFS_COMPARE_TREE_DELETED
) {
5975 sctx
->cur_inode_gen
= right_gen
;
5976 sctx
->cur_inode_new
= 0;
5977 sctx
->cur_inode_deleted
= 1;
5978 sctx
->cur_inode_size
= btrfs_inode_size(
5979 sctx
->right_path
->nodes
[0], right_ii
);
5980 sctx
->cur_inode_mode
= btrfs_inode_mode(
5981 sctx
->right_path
->nodes
[0], right_ii
);
5982 } else if (result
== BTRFS_COMPARE_TREE_CHANGED
) {
5984 * We need to do some special handling in case the inode was
5985 * reported as changed with a changed generation number. This
5986 * means that the original inode was deleted and new inode
5987 * reused the same inum. So we have to treat the old inode as
5988 * deleted and the new one as new.
5990 if (sctx
->cur_inode_new_gen
) {
5992 * First, process the inode as if it was deleted.
5994 sctx
->cur_inode_gen
= right_gen
;
5995 sctx
->cur_inode_new
= 0;
5996 sctx
->cur_inode_deleted
= 1;
5997 sctx
->cur_inode_size
= btrfs_inode_size(
5998 sctx
->right_path
->nodes
[0], right_ii
);
5999 sctx
->cur_inode_mode
= btrfs_inode_mode(
6000 sctx
->right_path
->nodes
[0], right_ii
);
6001 ret
= process_all_refs(sctx
,
6002 BTRFS_COMPARE_TREE_DELETED
);
6007 * Now process the inode as if it was new.
6009 sctx
->cur_inode_gen
= left_gen
;
6010 sctx
->cur_inode_new
= 1;
6011 sctx
->cur_inode_deleted
= 0;
6012 sctx
->cur_inode_size
= btrfs_inode_size(
6013 sctx
->left_path
->nodes
[0], left_ii
);
6014 sctx
->cur_inode_mode
= btrfs_inode_mode(
6015 sctx
->left_path
->nodes
[0], left_ii
);
6016 sctx
->cur_inode_rdev
= btrfs_inode_rdev(
6017 sctx
->left_path
->nodes
[0], left_ii
);
6018 ret
= send_create_inode_if_needed(sctx
);
6022 ret
= process_all_refs(sctx
, BTRFS_COMPARE_TREE_NEW
);
6026 * Advance send_progress now as we did not get into
6027 * process_recorded_refs_if_needed in the new_gen case.
6029 sctx
->send_progress
= sctx
->cur_ino
+ 1;
6032 * Now process all extents and xattrs of the inode as if
6033 * they were all new.
6035 ret
= process_all_extents(sctx
);
6038 ret
= process_all_new_xattrs(sctx
);
6042 sctx
->cur_inode_gen
= left_gen
;
6043 sctx
->cur_inode_new
= 0;
6044 sctx
->cur_inode_new_gen
= 0;
6045 sctx
->cur_inode_deleted
= 0;
6046 sctx
->cur_inode_size
= btrfs_inode_size(
6047 sctx
->left_path
->nodes
[0], left_ii
);
6048 sctx
->cur_inode_mode
= btrfs_inode_mode(
6049 sctx
->left_path
->nodes
[0], left_ii
);
6058 * We have to process new refs before deleted refs, but compare_trees gives us
6059 * the new and deleted refs mixed. To fix this, we record the new/deleted refs
6060 * first and later process them in process_recorded_refs.
6061 * For the cur_inode_new_gen case, we skip recording completely because
6062 * changed_inode did already initiate processing of refs. The reason for this is
6063 * that in this case, compare_tree actually compares the refs of 2 different
6064 * inodes. To fix this, process_all_refs is used in changed_inode to handle all
6065 * refs of the right tree as deleted and all refs of the left tree as new.
6067 static int changed_ref(struct send_ctx
*sctx
,
6068 enum btrfs_compare_tree_result result
)
6072 if (sctx
->cur_ino
!= sctx
->cmp_key
->objectid
) {
6073 inconsistent_snapshot_error(sctx
, result
, "reference");
6077 if (!sctx
->cur_inode_new_gen
&&
6078 sctx
->cur_ino
!= BTRFS_FIRST_FREE_OBJECTID
) {
6079 if (result
== BTRFS_COMPARE_TREE_NEW
)
6080 ret
= record_new_ref(sctx
);
6081 else if (result
== BTRFS_COMPARE_TREE_DELETED
)
6082 ret
= record_deleted_ref(sctx
);
6083 else if (result
== BTRFS_COMPARE_TREE_CHANGED
)
6084 ret
= record_changed_ref(sctx
);
6091 * Process new/deleted/changed xattrs. We skip processing in the
6092 * cur_inode_new_gen case because changed_inode did already initiate processing
6093 * of xattrs. The reason is the same as in changed_ref
6095 static int changed_xattr(struct send_ctx
*sctx
,
6096 enum btrfs_compare_tree_result result
)
6100 if (sctx
->cur_ino
!= sctx
->cmp_key
->objectid
) {
6101 inconsistent_snapshot_error(sctx
, result
, "xattr");
6105 if (!sctx
->cur_inode_new_gen
&& !sctx
->cur_inode_deleted
) {
6106 if (result
== BTRFS_COMPARE_TREE_NEW
)
6107 ret
= process_new_xattr(sctx
);
6108 else if (result
== BTRFS_COMPARE_TREE_DELETED
)
6109 ret
= process_deleted_xattr(sctx
);
6110 else if (result
== BTRFS_COMPARE_TREE_CHANGED
)
6111 ret
= process_changed_xattr(sctx
);
6118 * Process new/deleted/changed extents. We skip processing in the
6119 * cur_inode_new_gen case because changed_inode did already initiate processing
6120 * of extents. The reason is the same as in changed_ref
6122 static int changed_extent(struct send_ctx
*sctx
,
6123 enum btrfs_compare_tree_result result
)
6127 if (sctx
->cur_ino
!= sctx
->cmp_key
->objectid
) {
6129 if (result
== BTRFS_COMPARE_TREE_CHANGED
) {
6130 struct extent_buffer
*leaf_l
;
6131 struct extent_buffer
*leaf_r
;
6132 struct btrfs_file_extent_item
*ei_l
;
6133 struct btrfs_file_extent_item
*ei_r
;
6135 leaf_l
= sctx
->left_path
->nodes
[0];
6136 leaf_r
= sctx
->right_path
->nodes
[0];
6137 ei_l
= btrfs_item_ptr(leaf_l
,
6138 sctx
->left_path
->slots
[0],
6139 struct btrfs_file_extent_item
);
6140 ei_r
= btrfs_item_ptr(leaf_r
,
6141 sctx
->right_path
->slots
[0],
6142 struct btrfs_file_extent_item
);
6145 * We may have found an extent item that has changed
6146 * only its disk_bytenr field and the corresponding
6147 * inode item was not updated. This case happens due to
6148 * very specific timings during relocation when a leaf
6149 * that contains file extent items is COWed while
6150 * relocation is ongoing and its in the stage where it
6151 * updates data pointers. So when this happens we can
6152 * safely ignore it since we know it's the same extent,
6153 * but just at different logical and physical locations
6154 * (when an extent is fully replaced with a new one, we
6155 * know the generation number must have changed too,
6156 * since snapshot creation implies committing the current
6157 * transaction, and the inode item must have been updated
6159 * This replacement of the disk_bytenr happens at
6160 * relocation.c:replace_file_extents() through
6161 * relocation.c:btrfs_reloc_cow_block().
6163 if (btrfs_file_extent_generation(leaf_l
, ei_l
) ==
6164 btrfs_file_extent_generation(leaf_r
, ei_r
) &&
6165 btrfs_file_extent_ram_bytes(leaf_l
, ei_l
) ==
6166 btrfs_file_extent_ram_bytes(leaf_r
, ei_r
) &&
6167 btrfs_file_extent_compression(leaf_l
, ei_l
) ==
6168 btrfs_file_extent_compression(leaf_r
, ei_r
) &&
6169 btrfs_file_extent_encryption(leaf_l
, ei_l
) ==
6170 btrfs_file_extent_encryption(leaf_r
, ei_r
) &&
6171 btrfs_file_extent_other_encoding(leaf_l
, ei_l
) ==
6172 btrfs_file_extent_other_encoding(leaf_r
, ei_r
) &&
6173 btrfs_file_extent_type(leaf_l
, ei_l
) ==
6174 btrfs_file_extent_type(leaf_r
, ei_r
) &&
6175 btrfs_file_extent_disk_bytenr(leaf_l
, ei_l
) !=
6176 btrfs_file_extent_disk_bytenr(leaf_r
, ei_r
) &&
6177 btrfs_file_extent_disk_num_bytes(leaf_l
, ei_l
) ==
6178 btrfs_file_extent_disk_num_bytes(leaf_r
, ei_r
) &&
6179 btrfs_file_extent_offset(leaf_l
, ei_l
) ==
6180 btrfs_file_extent_offset(leaf_r
, ei_r
) &&
6181 btrfs_file_extent_num_bytes(leaf_l
, ei_l
) ==
6182 btrfs_file_extent_num_bytes(leaf_r
, ei_r
))
6186 inconsistent_snapshot_error(sctx
, result
, "extent");
6190 if (!sctx
->cur_inode_new_gen
&& !sctx
->cur_inode_deleted
) {
6191 if (result
!= BTRFS_COMPARE_TREE_DELETED
)
6192 ret
= process_extent(sctx
, sctx
->left_path
,
6199 static int dir_changed(struct send_ctx
*sctx
, u64 dir
)
6201 u64 orig_gen
, new_gen
;
6204 ret
= get_inode_info(sctx
->send_root
, dir
, NULL
, &new_gen
, NULL
, NULL
,
6209 ret
= get_inode_info(sctx
->parent_root
, dir
, NULL
, &orig_gen
, NULL
,
6214 return (orig_gen
!= new_gen
) ? 1 : 0;
6217 static int compare_refs(struct send_ctx
*sctx
, struct btrfs_path
*path
,
6218 struct btrfs_key
*key
)
6220 struct btrfs_inode_extref
*extref
;
6221 struct extent_buffer
*leaf
;
6222 u64 dirid
= 0, last_dirid
= 0;
6229 /* Easy case, just check this one dirid */
6230 if (key
->type
== BTRFS_INODE_REF_KEY
) {
6231 dirid
= key
->offset
;
6233 ret
= dir_changed(sctx
, dirid
);
6237 leaf
= path
->nodes
[0];
6238 item_size
= btrfs_item_size_nr(leaf
, path
->slots
[0]);
6239 ptr
= btrfs_item_ptr_offset(leaf
, path
->slots
[0]);
6240 while (cur_offset
< item_size
) {
6241 extref
= (struct btrfs_inode_extref
*)(ptr
+
6243 dirid
= btrfs_inode_extref_parent(leaf
, extref
);
6244 ref_name_len
= btrfs_inode_extref_name_len(leaf
, extref
);
6245 cur_offset
+= ref_name_len
+ sizeof(*extref
);
6246 if (dirid
== last_dirid
)
6248 ret
= dir_changed(sctx
, dirid
);
6258 * Updates compare related fields in sctx and simply forwards to the actual
6259 * changed_xxx functions.
6261 static int changed_cb(struct btrfs_path
*left_path
,
6262 struct btrfs_path
*right_path
,
6263 struct btrfs_key
*key
,
6264 enum btrfs_compare_tree_result result
,
6268 struct send_ctx
*sctx
= ctx
;
6270 if (result
== BTRFS_COMPARE_TREE_SAME
) {
6271 if (key
->type
== BTRFS_INODE_REF_KEY
||
6272 key
->type
== BTRFS_INODE_EXTREF_KEY
) {
6273 ret
= compare_refs(sctx
, left_path
, key
);
6278 } else if (key
->type
== BTRFS_EXTENT_DATA_KEY
) {
6279 return maybe_send_hole(sctx
, left_path
, key
);
6283 result
= BTRFS_COMPARE_TREE_CHANGED
;
6287 sctx
->left_path
= left_path
;
6288 sctx
->right_path
= right_path
;
6289 sctx
->cmp_key
= key
;
6291 ret
= finish_inode_if_needed(sctx
, 0);
6295 /* Ignore non-FS objects */
6296 if (key
->objectid
== BTRFS_FREE_INO_OBJECTID
||
6297 key
->objectid
== BTRFS_FREE_SPACE_OBJECTID
)
6300 if (key
->type
== BTRFS_INODE_ITEM_KEY
)
6301 ret
= changed_inode(sctx
, result
);
6302 else if (key
->type
== BTRFS_INODE_REF_KEY
||
6303 key
->type
== BTRFS_INODE_EXTREF_KEY
)
6304 ret
= changed_ref(sctx
, result
);
6305 else if (key
->type
== BTRFS_XATTR_ITEM_KEY
)
6306 ret
= changed_xattr(sctx
, result
);
6307 else if (key
->type
== BTRFS_EXTENT_DATA_KEY
)
6308 ret
= changed_extent(sctx
, result
);
6314 static int full_send_tree(struct send_ctx
*sctx
)
6317 struct btrfs_root
*send_root
= sctx
->send_root
;
6318 struct btrfs_key key
;
6319 struct btrfs_key found_key
;
6320 struct btrfs_path
*path
;
6321 struct extent_buffer
*eb
;
6324 path
= alloc_path_for_send();
6328 key
.objectid
= BTRFS_FIRST_FREE_OBJECTID
;
6329 key
.type
= BTRFS_INODE_ITEM_KEY
;
6332 ret
= btrfs_search_slot_for_read(send_root
, &key
, path
, 1, 0);
6339 eb
= path
->nodes
[0];
6340 slot
= path
->slots
[0];
6341 btrfs_item_key_to_cpu(eb
, &found_key
, slot
);
6343 ret
= changed_cb(path
, NULL
, &found_key
,
6344 BTRFS_COMPARE_TREE_NEW
, sctx
);
6348 key
.objectid
= found_key
.objectid
;
6349 key
.type
= found_key
.type
;
6350 key
.offset
= found_key
.offset
+ 1;
6352 ret
= btrfs_next_item(send_root
, path
);
6362 ret
= finish_inode_if_needed(sctx
, 1);
6365 btrfs_free_path(path
);
6369 static int send_subvol(struct send_ctx
*sctx
)
6373 if (!(sctx
->flags
& BTRFS_SEND_FLAG_OMIT_STREAM_HEADER
)) {
6374 ret
= send_header(sctx
);
6379 ret
= send_subvol_begin(sctx
);
6383 if (sctx
->parent_root
) {
6384 ret
= btrfs_compare_trees(sctx
->send_root
, sctx
->parent_root
,
6388 ret
= finish_inode_if_needed(sctx
, 1);
6392 ret
= full_send_tree(sctx
);
6398 free_recorded_refs(sctx
);
6403 * If orphan cleanup did remove any orphans from a root, it means the tree
6404 * was modified and therefore the commit root is not the same as the current
6405 * root anymore. This is a problem, because send uses the commit root and
6406 * therefore can see inode items that don't exist in the current root anymore,
6407 * and for example make calls to btrfs_iget, which will do tree lookups based
6408 * on the current root and not on the commit root. Those lookups will fail,
6409 * returning a -ESTALE error, and making send fail with that error. So make
6410 * sure a send does not see any orphans we have just removed, and that it will
6411 * see the same inodes regardless of whether a transaction commit happened
6412 * before it started (meaning that the commit root will be the same as the
6413 * current root) or not.
6415 static int ensure_commit_roots_uptodate(struct send_ctx
*sctx
)
6418 struct btrfs_trans_handle
*trans
= NULL
;
6421 if (sctx
->parent_root
&&
6422 sctx
->parent_root
->node
!= sctx
->parent_root
->commit_root
)
6425 for (i
= 0; i
< sctx
->clone_roots_cnt
; i
++)
6426 if (sctx
->clone_roots
[i
].root
->node
!=
6427 sctx
->clone_roots
[i
].root
->commit_root
)
6431 return btrfs_end_transaction(trans
);
6436 /* Use any root, all fs roots will get their commit roots updated. */
6438 trans
= btrfs_join_transaction(sctx
->send_root
);
6440 return PTR_ERR(trans
);
6444 return btrfs_commit_transaction(trans
);
6447 static void btrfs_root_dec_send_in_progress(struct btrfs_root
* root
)
6449 spin_lock(&root
->root_item_lock
);
6450 root
->send_in_progress
--;
6452 * Not much left to do, we don't know why it's unbalanced and
6453 * can't blindly reset it to 0.
6455 if (root
->send_in_progress
< 0)
6456 btrfs_err(root
->fs_info
,
6457 "send_in_progres unbalanced %d root %llu",
6458 root
->send_in_progress
, root
->root_key
.objectid
);
6459 spin_unlock(&root
->root_item_lock
);
6462 long btrfs_ioctl_send(struct file
*mnt_file
, struct btrfs_ioctl_send_args
*arg
)
6465 struct btrfs_root
*send_root
= BTRFS_I(file_inode(mnt_file
))->root
;
6466 struct btrfs_fs_info
*fs_info
= send_root
->fs_info
;
6467 struct btrfs_root
*clone_root
;
6468 struct btrfs_key key
;
6469 struct send_ctx
*sctx
= NULL
;
6471 u64
*clone_sources_tmp
= NULL
;
6472 int clone_sources_to_rollback
= 0;
6473 unsigned alloc_size
;
6474 int sort_clone_roots
= 0;
6477 if (!capable(CAP_SYS_ADMIN
))
6481 * The subvolume must remain read-only during send, protect against
6482 * making it RW. This also protects against deletion.
6484 spin_lock(&send_root
->root_item_lock
);
6485 send_root
->send_in_progress
++;
6486 spin_unlock(&send_root
->root_item_lock
);
6489 * This is done when we lookup the root, it should already be complete
6490 * by the time we get here.
6492 WARN_ON(send_root
->orphan_cleanup_state
!= ORPHAN_CLEANUP_DONE
);
6495 * Userspace tools do the checks and warn the user if it's
6498 if (!btrfs_root_readonly(send_root
)) {
6504 * Check that we don't overflow at later allocations, we request
6505 * clone_sources_count + 1 items, and compare to unsigned long inside
6508 if (arg
->clone_sources_count
>
6509 ULONG_MAX
/ sizeof(struct clone_root
) - 1) {
6514 if (!access_ok(VERIFY_READ
, arg
->clone_sources
,
6515 sizeof(*arg
->clone_sources
) *
6516 arg
->clone_sources_count
)) {
6521 if (arg
->flags
& ~BTRFS_SEND_FLAG_MASK
) {
6526 sctx
= kzalloc(sizeof(struct send_ctx
), GFP_KERNEL
);
6532 INIT_LIST_HEAD(&sctx
->new_refs
);
6533 INIT_LIST_HEAD(&sctx
->deleted_refs
);
6534 INIT_RADIX_TREE(&sctx
->name_cache
, GFP_KERNEL
);
6535 INIT_LIST_HEAD(&sctx
->name_cache_list
);
6537 sctx
->flags
= arg
->flags
;
6539 sctx
->send_filp
= fget(arg
->send_fd
);
6540 if (!sctx
->send_filp
) {
6545 sctx
->send_root
= send_root
;
6547 * Unlikely but possible, if the subvolume is marked for deletion but
6548 * is slow to remove the directory entry, send can still be started
6550 if (btrfs_root_dead(sctx
->send_root
)) {
6555 sctx
->clone_roots_cnt
= arg
->clone_sources_count
;
6557 sctx
->send_max_size
= BTRFS_SEND_BUF_SIZE
;
6558 sctx
->send_buf
= kvmalloc(sctx
->send_max_size
, GFP_KERNEL
);
6559 if (!sctx
->send_buf
) {
6564 sctx
->read_buf
= kvmalloc(BTRFS_SEND_READ_SIZE
, GFP_KERNEL
);
6565 if (!sctx
->read_buf
) {
6570 sctx
->pending_dir_moves
= RB_ROOT
;
6571 sctx
->waiting_dir_moves
= RB_ROOT
;
6572 sctx
->orphan_dirs
= RB_ROOT
;
6574 alloc_size
= sizeof(struct clone_root
) * (arg
->clone_sources_count
+ 1);
6576 sctx
->clone_roots
= kzalloc(alloc_size
, GFP_KERNEL
);
6577 if (!sctx
->clone_roots
) {
6582 alloc_size
= arg
->clone_sources_count
* sizeof(*arg
->clone_sources
);
6584 if (arg
->clone_sources_count
) {
6585 clone_sources_tmp
= kvmalloc(alloc_size
, GFP_KERNEL
);
6586 if (!clone_sources_tmp
) {
6591 ret
= copy_from_user(clone_sources_tmp
, arg
->clone_sources
,
6598 for (i
= 0; i
< arg
->clone_sources_count
; i
++) {
6599 key
.objectid
= clone_sources_tmp
[i
];
6600 key
.type
= BTRFS_ROOT_ITEM_KEY
;
6601 key
.offset
= (u64
)-1;
6603 index
= srcu_read_lock(&fs_info
->subvol_srcu
);
6605 clone_root
= btrfs_read_fs_root_no_name(fs_info
, &key
);
6606 if (IS_ERR(clone_root
)) {
6607 srcu_read_unlock(&fs_info
->subvol_srcu
, index
);
6608 ret
= PTR_ERR(clone_root
);
6611 spin_lock(&clone_root
->root_item_lock
);
6612 if (!btrfs_root_readonly(clone_root
) ||
6613 btrfs_root_dead(clone_root
)) {
6614 spin_unlock(&clone_root
->root_item_lock
);
6615 srcu_read_unlock(&fs_info
->subvol_srcu
, index
);
6619 clone_root
->send_in_progress
++;
6620 spin_unlock(&clone_root
->root_item_lock
);
6621 srcu_read_unlock(&fs_info
->subvol_srcu
, index
);
6623 sctx
->clone_roots
[i
].root
= clone_root
;
6624 clone_sources_to_rollback
= i
+ 1;
6626 kvfree(clone_sources_tmp
);
6627 clone_sources_tmp
= NULL
;
6630 if (arg
->parent_root
) {
6631 key
.objectid
= arg
->parent_root
;
6632 key
.type
= BTRFS_ROOT_ITEM_KEY
;
6633 key
.offset
= (u64
)-1;
6635 index
= srcu_read_lock(&fs_info
->subvol_srcu
);
6637 sctx
->parent_root
= btrfs_read_fs_root_no_name(fs_info
, &key
);
6638 if (IS_ERR(sctx
->parent_root
)) {
6639 srcu_read_unlock(&fs_info
->subvol_srcu
, index
);
6640 ret
= PTR_ERR(sctx
->parent_root
);
6644 spin_lock(&sctx
->parent_root
->root_item_lock
);
6645 sctx
->parent_root
->send_in_progress
++;
6646 if (!btrfs_root_readonly(sctx
->parent_root
) ||
6647 btrfs_root_dead(sctx
->parent_root
)) {
6648 spin_unlock(&sctx
->parent_root
->root_item_lock
);
6649 srcu_read_unlock(&fs_info
->subvol_srcu
, index
);
6653 spin_unlock(&sctx
->parent_root
->root_item_lock
);
6655 srcu_read_unlock(&fs_info
->subvol_srcu
, index
);
6659 * Clones from send_root are allowed, but only if the clone source
6660 * is behind the current send position. This is checked while searching
6661 * for possible clone sources.
6663 sctx
->clone_roots
[sctx
->clone_roots_cnt
++].root
= sctx
->send_root
;
6665 /* We do a bsearch later */
6666 sort(sctx
->clone_roots
, sctx
->clone_roots_cnt
,
6667 sizeof(*sctx
->clone_roots
), __clone_root_cmp_sort
,
6669 sort_clone_roots
= 1;
6671 ret
= ensure_commit_roots_uptodate(sctx
);
6675 current
->journal_info
= BTRFS_SEND_TRANS_STUB
;
6676 ret
= send_subvol(sctx
);
6677 current
->journal_info
= NULL
;
6681 if (!(sctx
->flags
& BTRFS_SEND_FLAG_OMIT_END_CMD
)) {
6682 ret
= begin_cmd(sctx
, BTRFS_SEND_C_END
);
6685 ret
= send_cmd(sctx
);
6691 WARN_ON(sctx
&& !ret
&& !RB_EMPTY_ROOT(&sctx
->pending_dir_moves
));
6692 while (sctx
&& !RB_EMPTY_ROOT(&sctx
->pending_dir_moves
)) {
6694 struct pending_dir_move
*pm
;
6696 n
= rb_first(&sctx
->pending_dir_moves
);
6697 pm
= rb_entry(n
, struct pending_dir_move
, node
);
6698 while (!list_empty(&pm
->list
)) {
6699 struct pending_dir_move
*pm2
;
6701 pm2
= list_first_entry(&pm
->list
,
6702 struct pending_dir_move
, list
);
6703 free_pending_move(sctx
, pm2
);
6705 free_pending_move(sctx
, pm
);
6708 WARN_ON(sctx
&& !ret
&& !RB_EMPTY_ROOT(&sctx
->waiting_dir_moves
));
6709 while (sctx
&& !RB_EMPTY_ROOT(&sctx
->waiting_dir_moves
)) {
6711 struct waiting_dir_move
*dm
;
6713 n
= rb_first(&sctx
->waiting_dir_moves
);
6714 dm
= rb_entry(n
, struct waiting_dir_move
, node
);
6715 rb_erase(&dm
->node
, &sctx
->waiting_dir_moves
);
6719 WARN_ON(sctx
&& !ret
&& !RB_EMPTY_ROOT(&sctx
->orphan_dirs
));
6720 while (sctx
&& !RB_EMPTY_ROOT(&sctx
->orphan_dirs
)) {
6722 struct orphan_dir_info
*odi
;
6724 n
= rb_first(&sctx
->orphan_dirs
);
6725 odi
= rb_entry(n
, struct orphan_dir_info
, node
);
6726 free_orphan_dir_info(sctx
, odi
);
6729 if (sort_clone_roots
) {
6730 for (i
= 0; i
< sctx
->clone_roots_cnt
; i
++)
6731 btrfs_root_dec_send_in_progress(
6732 sctx
->clone_roots
[i
].root
);
6734 for (i
= 0; sctx
&& i
< clone_sources_to_rollback
; i
++)
6735 btrfs_root_dec_send_in_progress(
6736 sctx
->clone_roots
[i
].root
);
6738 btrfs_root_dec_send_in_progress(send_root
);
6740 if (sctx
&& !IS_ERR_OR_NULL(sctx
->parent_root
))
6741 btrfs_root_dec_send_in_progress(sctx
->parent_root
);
6743 kvfree(clone_sources_tmp
);
6746 if (sctx
->send_filp
)
6747 fput(sctx
->send_filp
);
6749 kvfree(sctx
->clone_roots
);
6750 kvfree(sctx
->send_buf
);
6751 kvfree(sctx
->read_buf
);
6753 name_cache_free(sctx
);