Linux 5.8-rc4
[linux/fpc-iii.git] / fs / btrfs / send.c
blobd9813a5b075aca670d53ae985fbce041befc34b0
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2012 Alexander Block. All rights reserved.
4 */
6 #include <linux/bsearch.h>
7 #include <linux/fs.h>
8 #include <linux/file.h>
9 #include <linux/sort.h>
10 #include <linux/mount.h>
11 #include <linux/xattr.h>
12 #include <linux/posix_acl_xattr.h>
13 #include <linux/radix-tree.h>
14 #include <linux/vmalloc.h>
15 #include <linux/string.h>
16 #include <linux/compat.h>
17 #include <linux/crc32c.h>
19 #include "send.h"
20 #include "backref.h"
21 #include "locking.h"
22 #include "disk-io.h"
23 #include "btrfs_inode.h"
24 #include "transaction.h"
25 #include "compression.h"
26 #include "xattr.h"
29 * Maximum number of references an extent can have in order for us to attempt to
30 * issue clone operations instead of write operations. This currently exists to
31 * avoid hitting limitations of the backreference walking code (taking a lot of
32 * time and using too much memory for extents with large number of references).
34 #define SEND_MAX_EXTENT_REFS 64
37 * A fs_path is a helper to dynamically build path names with unknown size.
38 * It reallocates the internal buffer on demand.
39 * It allows fast adding of path elements on the right side (normal path) and
40 * fast adding to the left side (reversed path). A reversed path can also be
41 * unreversed if needed.
43 struct fs_path {
44 union {
45 struct {
46 char *start;
47 char *end;
49 char *buf;
50 unsigned short buf_len:15;
51 unsigned short reversed:1;
52 char inline_buf[];
55 * Average path length does not exceed 200 bytes, we'll have
56 * better packing in the slab and higher chance to satisfy
57 * a allocation later during send.
59 char pad[256];
62 #define FS_PATH_INLINE_SIZE \
63 (sizeof(struct fs_path) - offsetof(struct fs_path, inline_buf))
66 /* reused for each extent */
67 struct clone_root {
68 struct btrfs_root *root;
69 u64 ino;
70 u64 offset;
72 u64 found_refs;
75 #define SEND_CTX_MAX_NAME_CACHE_SIZE 128
76 #define SEND_CTX_NAME_CACHE_CLEAN_SIZE (SEND_CTX_MAX_NAME_CACHE_SIZE * 2)
78 struct send_ctx {
79 struct file *send_filp;
80 loff_t send_off;
81 char *send_buf;
82 u32 send_size;
83 u32 send_max_size;
84 u64 total_send_size;
85 u64 cmd_send_size[BTRFS_SEND_C_MAX + 1];
86 u64 flags; /* 'flags' member of btrfs_ioctl_send_args is u64 */
88 struct btrfs_root *send_root;
89 struct btrfs_root *parent_root;
90 struct clone_root *clone_roots;
91 int clone_roots_cnt;
93 /* current state of the compare_tree call */
94 struct btrfs_path *left_path;
95 struct btrfs_path *right_path;
96 struct btrfs_key *cmp_key;
99 * infos of the currently processed inode. In case of deleted inodes,
100 * these are the values from the deleted inode.
102 u64 cur_ino;
103 u64 cur_inode_gen;
104 int cur_inode_new;
105 int cur_inode_new_gen;
106 int cur_inode_deleted;
107 u64 cur_inode_size;
108 u64 cur_inode_mode;
109 u64 cur_inode_rdev;
110 u64 cur_inode_last_extent;
111 u64 cur_inode_next_write_offset;
112 bool ignore_cur_inode;
114 u64 send_progress;
116 struct list_head new_refs;
117 struct list_head deleted_refs;
119 struct radix_tree_root name_cache;
120 struct list_head name_cache_list;
121 int name_cache_size;
123 struct file_ra_state ra;
125 char *read_buf;
128 * We process inodes by their increasing order, so if before an
129 * incremental send we reverse the parent/child relationship of
130 * directories such that a directory with a lower inode number was
131 * the parent of a directory with a higher inode number, and the one
132 * becoming the new parent got renamed too, we can't rename/move the
133 * directory with lower inode number when we finish processing it - we
134 * must process the directory with higher inode number first, then
135 * rename/move it and then rename/move the directory with lower inode
136 * number. Example follows.
138 * Tree state when the first send was performed:
141 * |-- a (ino 257)
142 * |-- b (ino 258)
145 * |-- c (ino 259)
146 * | |-- d (ino 260)
148 * |-- c2 (ino 261)
150 * Tree state when the second (incremental) send is performed:
153 * |-- a (ino 257)
154 * |-- b (ino 258)
155 * |-- c2 (ino 261)
156 * |-- d2 (ino 260)
157 * |-- cc (ino 259)
159 * The sequence of steps that lead to the second state was:
161 * mv /a/b/c/d /a/b/c2/d2
162 * mv /a/b/c /a/b/c2/d2/cc
164 * "c" has lower inode number, but we can't move it (2nd mv operation)
165 * before we move "d", which has higher inode number.
167 * So we just memorize which move/rename operations must be performed
168 * later when their respective parent is processed and moved/renamed.
171 /* Indexed by parent directory inode number. */
172 struct rb_root pending_dir_moves;
175 * Reverse index, indexed by the inode number of a directory that
176 * is waiting for the move/rename of its immediate parent before its
177 * own move/rename can be performed.
179 struct rb_root waiting_dir_moves;
182 * A directory that is going to be rm'ed might have a child directory
183 * which is in the pending directory moves index above. In this case,
184 * the directory can only be removed after the move/rename of its child
185 * is performed. Example:
187 * Parent snapshot:
189 * . (ino 256)
190 * |-- a/ (ino 257)
191 * |-- b/ (ino 258)
192 * |-- c/ (ino 259)
193 * | |-- x/ (ino 260)
195 * |-- y/ (ino 261)
197 * Send snapshot:
199 * . (ino 256)
200 * |-- a/ (ino 257)
201 * |-- b/ (ino 258)
202 * |-- YY/ (ino 261)
203 * |-- x/ (ino 260)
205 * Sequence of steps that lead to the send snapshot:
206 * rm -f /a/b/c/foo.txt
207 * mv /a/b/y /a/b/YY
208 * mv /a/b/c/x /a/b/YY
209 * rmdir /a/b/c
211 * When the child is processed, its move/rename is delayed until its
212 * parent is processed (as explained above), but all other operations
213 * like update utimes, chown, chgrp, etc, are performed and the paths
214 * that it uses for those operations must use the orphanized name of
215 * its parent (the directory we're going to rm later), so we need to
216 * memorize that name.
218 * Indexed by the inode number of the directory to be deleted.
220 struct rb_root orphan_dirs;
223 struct pending_dir_move {
224 struct rb_node node;
225 struct list_head list;
226 u64 parent_ino;
227 u64 ino;
228 u64 gen;
229 struct list_head update_refs;
232 struct waiting_dir_move {
233 struct rb_node node;
234 u64 ino;
236 * There might be some directory that could not be removed because it
237 * was waiting for this directory inode to be moved first. Therefore
238 * after this directory is moved, we can try to rmdir the ino rmdir_ino.
240 u64 rmdir_ino;
241 bool orphanized;
244 struct orphan_dir_info {
245 struct rb_node node;
246 u64 ino;
247 u64 gen;
248 u64 last_dir_index_offset;
251 struct name_cache_entry {
252 struct list_head list;
254 * radix_tree has only 32bit entries but we need to handle 64bit inums.
255 * We use the lower 32bit of the 64bit inum to store it in the tree. If
256 * more then one inum would fall into the same entry, we use radix_list
257 * to store the additional entries. radix_list is also used to store
258 * entries where two entries have the same inum but different
259 * generations.
261 struct list_head radix_list;
262 u64 ino;
263 u64 gen;
264 u64 parent_ino;
265 u64 parent_gen;
266 int ret;
267 int need_later_update;
268 int name_len;
269 char name[];
272 #define ADVANCE 1
273 #define ADVANCE_ONLY_NEXT -1
275 enum btrfs_compare_tree_result {
276 BTRFS_COMPARE_TREE_NEW,
277 BTRFS_COMPARE_TREE_DELETED,
278 BTRFS_COMPARE_TREE_CHANGED,
279 BTRFS_COMPARE_TREE_SAME,
281 typedef int (*btrfs_changed_cb_t)(struct btrfs_path *left_path,
282 struct btrfs_path *right_path,
283 struct btrfs_key *key,
284 enum btrfs_compare_tree_result result,
285 void *ctx);
287 __cold
288 static void inconsistent_snapshot_error(struct send_ctx *sctx,
289 enum btrfs_compare_tree_result result,
290 const char *what)
292 const char *result_string;
294 switch (result) {
295 case BTRFS_COMPARE_TREE_NEW:
296 result_string = "new";
297 break;
298 case BTRFS_COMPARE_TREE_DELETED:
299 result_string = "deleted";
300 break;
301 case BTRFS_COMPARE_TREE_CHANGED:
302 result_string = "updated";
303 break;
304 case BTRFS_COMPARE_TREE_SAME:
305 ASSERT(0);
306 result_string = "unchanged";
307 break;
308 default:
309 ASSERT(0);
310 result_string = "unexpected";
313 btrfs_err(sctx->send_root->fs_info,
314 "Send: inconsistent snapshot, found %s %s for inode %llu without updated inode item, send root is %llu, parent root is %llu",
315 result_string, what, sctx->cmp_key->objectid,
316 sctx->send_root->root_key.objectid,
317 (sctx->parent_root ?
318 sctx->parent_root->root_key.objectid : 0));
321 static int is_waiting_for_move(struct send_ctx *sctx, u64 ino);
323 static struct waiting_dir_move *
324 get_waiting_dir_move(struct send_ctx *sctx, u64 ino);
326 static int is_waiting_for_rm(struct send_ctx *sctx, u64 dir_ino);
328 static int need_send_hole(struct send_ctx *sctx)
330 return (sctx->parent_root && !sctx->cur_inode_new &&
331 !sctx->cur_inode_new_gen && !sctx->cur_inode_deleted &&
332 S_ISREG(sctx->cur_inode_mode));
335 static void fs_path_reset(struct fs_path *p)
337 if (p->reversed) {
338 p->start = p->buf + p->buf_len - 1;
339 p->end = p->start;
340 *p->start = 0;
341 } else {
342 p->start = p->buf;
343 p->end = p->start;
344 *p->start = 0;
348 static struct fs_path *fs_path_alloc(void)
350 struct fs_path *p;
352 p = kmalloc(sizeof(*p), GFP_KERNEL);
353 if (!p)
354 return NULL;
355 p->reversed = 0;
356 p->buf = p->inline_buf;
357 p->buf_len = FS_PATH_INLINE_SIZE;
358 fs_path_reset(p);
359 return p;
362 static struct fs_path *fs_path_alloc_reversed(void)
364 struct fs_path *p;
366 p = fs_path_alloc();
367 if (!p)
368 return NULL;
369 p->reversed = 1;
370 fs_path_reset(p);
371 return p;
374 static void fs_path_free(struct fs_path *p)
376 if (!p)
377 return;
378 if (p->buf != p->inline_buf)
379 kfree(p->buf);
380 kfree(p);
383 static int fs_path_len(struct fs_path *p)
385 return p->end - p->start;
388 static int fs_path_ensure_buf(struct fs_path *p, int len)
390 char *tmp_buf;
391 int path_len;
392 int old_buf_len;
394 len++;
396 if (p->buf_len >= len)
397 return 0;
399 if (len > PATH_MAX) {
400 WARN_ON(1);
401 return -ENOMEM;
404 path_len = p->end - p->start;
405 old_buf_len = p->buf_len;
408 * First time the inline_buf does not suffice
410 if (p->buf == p->inline_buf) {
411 tmp_buf = kmalloc(len, GFP_KERNEL);
412 if (tmp_buf)
413 memcpy(tmp_buf, p->buf, old_buf_len);
414 } else {
415 tmp_buf = krealloc(p->buf, len, GFP_KERNEL);
417 if (!tmp_buf)
418 return -ENOMEM;
419 p->buf = tmp_buf;
421 * The real size of the buffer is bigger, this will let the fast path
422 * happen most of the time
424 p->buf_len = ksize(p->buf);
426 if (p->reversed) {
427 tmp_buf = p->buf + old_buf_len - path_len - 1;
428 p->end = p->buf + p->buf_len - 1;
429 p->start = p->end - path_len;
430 memmove(p->start, tmp_buf, path_len + 1);
431 } else {
432 p->start = p->buf;
433 p->end = p->start + path_len;
435 return 0;
438 static int fs_path_prepare_for_add(struct fs_path *p, int name_len,
439 char **prepared)
441 int ret;
442 int new_len;
444 new_len = p->end - p->start + name_len;
445 if (p->start != p->end)
446 new_len++;
447 ret = fs_path_ensure_buf(p, new_len);
448 if (ret < 0)
449 goto out;
451 if (p->reversed) {
452 if (p->start != p->end)
453 *--p->start = '/';
454 p->start -= name_len;
455 *prepared = p->start;
456 } else {
457 if (p->start != p->end)
458 *p->end++ = '/';
459 *prepared = p->end;
460 p->end += name_len;
461 *p->end = 0;
464 out:
465 return ret;
468 static int fs_path_add(struct fs_path *p, const char *name, int name_len)
470 int ret;
471 char *prepared;
473 ret = fs_path_prepare_for_add(p, name_len, &prepared);
474 if (ret < 0)
475 goto out;
476 memcpy(prepared, name, name_len);
478 out:
479 return ret;
482 static int fs_path_add_path(struct fs_path *p, struct fs_path *p2)
484 int ret;
485 char *prepared;
487 ret = fs_path_prepare_for_add(p, p2->end - p2->start, &prepared);
488 if (ret < 0)
489 goto out;
490 memcpy(prepared, p2->start, p2->end - p2->start);
492 out:
493 return ret;
496 static int fs_path_add_from_extent_buffer(struct fs_path *p,
497 struct extent_buffer *eb,
498 unsigned long off, int len)
500 int ret;
501 char *prepared;
503 ret = fs_path_prepare_for_add(p, len, &prepared);
504 if (ret < 0)
505 goto out;
507 read_extent_buffer(eb, prepared, off, len);
509 out:
510 return ret;
513 static int fs_path_copy(struct fs_path *p, struct fs_path *from)
515 int ret;
517 p->reversed = from->reversed;
518 fs_path_reset(p);
520 ret = fs_path_add_path(p, from);
522 return ret;
526 static void fs_path_unreverse(struct fs_path *p)
528 char *tmp;
529 int len;
531 if (!p->reversed)
532 return;
534 tmp = p->start;
535 len = p->end - p->start;
536 p->start = p->buf;
537 p->end = p->start + len;
538 memmove(p->start, tmp, len + 1);
539 p->reversed = 0;
542 static struct btrfs_path *alloc_path_for_send(void)
544 struct btrfs_path *path;
546 path = btrfs_alloc_path();
547 if (!path)
548 return NULL;
549 path->search_commit_root = 1;
550 path->skip_locking = 1;
551 path->need_commit_sem = 1;
552 return path;
555 static int write_buf(struct file *filp, const void *buf, u32 len, loff_t *off)
557 int ret;
558 u32 pos = 0;
560 while (pos < len) {
561 ret = kernel_write(filp, buf + pos, len - pos, off);
562 /* TODO handle that correctly */
563 /*if (ret == -ERESTARTSYS) {
564 continue;
566 if (ret < 0)
567 return ret;
568 if (ret == 0) {
569 return -EIO;
571 pos += ret;
574 return 0;
577 static int tlv_put(struct send_ctx *sctx, u16 attr, const void *data, int len)
579 struct btrfs_tlv_header *hdr;
580 int total_len = sizeof(*hdr) + len;
581 int left = sctx->send_max_size - sctx->send_size;
583 if (unlikely(left < total_len))
584 return -EOVERFLOW;
586 hdr = (struct btrfs_tlv_header *) (sctx->send_buf + sctx->send_size);
587 hdr->tlv_type = cpu_to_le16(attr);
588 hdr->tlv_len = cpu_to_le16(len);
589 memcpy(hdr + 1, data, len);
590 sctx->send_size += total_len;
592 return 0;
595 #define TLV_PUT_DEFINE_INT(bits) \
596 static int tlv_put_u##bits(struct send_ctx *sctx, \
597 u##bits attr, u##bits value) \
599 __le##bits __tmp = cpu_to_le##bits(value); \
600 return tlv_put(sctx, attr, &__tmp, sizeof(__tmp)); \
603 TLV_PUT_DEFINE_INT(64)
605 static int tlv_put_string(struct send_ctx *sctx, u16 attr,
606 const char *str, int len)
608 if (len == -1)
609 len = strlen(str);
610 return tlv_put(sctx, attr, str, len);
613 static int tlv_put_uuid(struct send_ctx *sctx, u16 attr,
614 const u8 *uuid)
616 return tlv_put(sctx, attr, uuid, BTRFS_UUID_SIZE);
619 static int tlv_put_btrfs_timespec(struct send_ctx *sctx, u16 attr,
620 struct extent_buffer *eb,
621 struct btrfs_timespec *ts)
623 struct btrfs_timespec bts;
624 read_extent_buffer(eb, &bts, (unsigned long)ts, sizeof(bts));
625 return tlv_put(sctx, attr, &bts, sizeof(bts));
629 #define TLV_PUT(sctx, attrtype, data, attrlen) \
630 do { \
631 ret = tlv_put(sctx, attrtype, data, attrlen); \
632 if (ret < 0) \
633 goto tlv_put_failure; \
634 } while (0)
636 #define TLV_PUT_INT(sctx, attrtype, bits, value) \
637 do { \
638 ret = tlv_put_u##bits(sctx, attrtype, value); \
639 if (ret < 0) \
640 goto tlv_put_failure; \
641 } while (0)
643 #define TLV_PUT_U8(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 8, data)
644 #define TLV_PUT_U16(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 16, data)
645 #define TLV_PUT_U32(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 32, data)
646 #define TLV_PUT_U64(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 64, data)
647 #define TLV_PUT_STRING(sctx, attrtype, str, len) \
648 do { \
649 ret = tlv_put_string(sctx, attrtype, str, len); \
650 if (ret < 0) \
651 goto tlv_put_failure; \
652 } while (0)
653 #define TLV_PUT_PATH(sctx, attrtype, p) \
654 do { \
655 ret = tlv_put_string(sctx, attrtype, p->start, \
656 p->end - p->start); \
657 if (ret < 0) \
658 goto tlv_put_failure; \
659 } while(0)
660 #define TLV_PUT_UUID(sctx, attrtype, uuid) \
661 do { \
662 ret = tlv_put_uuid(sctx, attrtype, uuid); \
663 if (ret < 0) \
664 goto tlv_put_failure; \
665 } while (0)
666 #define TLV_PUT_BTRFS_TIMESPEC(sctx, attrtype, eb, ts) \
667 do { \
668 ret = tlv_put_btrfs_timespec(sctx, attrtype, eb, ts); \
669 if (ret < 0) \
670 goto tlv_put_failure; \
671 } while (0)
673 static int send_header(struct send_ctx *sctx)
675 struct btrfs_stream_header hdr;
677 strcpy(hdr.magic, BTRFS_SEND_STREAM_MAGIC);
678 hdr.version = cpu_to_le32(BTRFS_SEND_STREAM_VERSION);
680 return write_buf(sctx->send_filp, &hdr, sizeof(hdr),
681 &sctx->send_off);
685 * For each command/item we want to send to userspace, we call this function.
687 static int begin_cmd(struct send_ctx *sctx, int cmd)
689 struct btrfs_cmd_header *hdr;
691 if (WARN_ON(!sctx->send_buf))
692 return -EINVAL;
694 BUG_ON(sctx->send_size);
696 sctx->send_size += sizeof(*hdr);
697 hdr = (struct btrfs_cmd_header *)sctx->send_buf;
698 hdr->cmd = cpu_to_le16(cmd);
700 return 0;
703 static int send_cmd(struct send_ctx *sctx)
705 int ret;
706 struct btrfs_cmd_header *hdr;
707 u32 crc;
709 hdr = (struct btrfs_cmd_header *)sctx->send_buf;
710 hdr->len = cpu_to_le32(sctx->send_size - sizeof(*hdr));
711 hdr->crc = 0;
713 crc = btrfs_crc32c(0, (unsigned char *)sctx->send_buf, sctx->send_size);
714 hdr->crc = cpu_to_le32(crc);
716 ret = write_buf(sctx->send_filp, sctx->send_buf, sctx->send_size,
717 &sctx->send_off);
719 sctx->total_send_size += sctx->send_size;
720 sctx->cmd_send_size[le16_to_cpu(hdr->cmd)] += sctx->send_size;
721 sctx->send_size = 0;
723 return ret;
727 * Sends a move instruction to user space
729 static int send_rename(struct send_ctx *sctx,
730 struct fs_path *from, struct fs_path *to)
732 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
733 int ret;
735 btrfs_debug(fs_info, "send_rename %s -> %s", from->start, to->start);
737 ret = begin_cmd(sctx, BTRFS_SEND_C_RENAME);
738 if (ret < 0)
739 goto out;
741 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, from);
742 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_TO, to);
744 ret = send_cmd(sctx);
746 tlv_put_failure:
747 out:
748 return ret;
752 * Sends a link instruction to user space
754 static int send_link(struct send_ctx *sctx,
755 struct fs_path *path, struct fs_path *lnk)
757 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
758 int ret;
760 btrfs_debug(fs_info, "send_link %s -> %s", path->start, lnk->start);
762 ret = begin_cmd(sctx, BTRFS_SEND_C_LINK);
763 if (ret < 0)
764 goto out;
766 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
767 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_LINK, lnk);
769 ret = send_cmd(sctx);
771 tlv_put_failure:
772 out:
773 return ret;
777 * Sends an unlink instruction to user space
779 static int send_unlink(struct send_ctx *sctx, struct fs_path *path)
781 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
782 int ret;
784 btrfs_debug(fs_info, "send_unlink %s", path->start);
786 ret = begin_cmd(sctx, BTRFS_SEND_C_UNLINK);
787 if (ret < 0)
788 goto out;
790 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
792 ret = send_cmd(sctx);
794 tlv_put_failure:
795 out:
796 return ret;
800 * Sends a rmdir instruction to user space
802 static int send_rmdir(struct send_ctx *sctx, struct fs_path *path)
804 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
805 int ret;
807 btrfs_debug(fs_info, "send_rmdir %s", path->start);
809 ret = begin_cmd(sctx, BTRFS_SEND_C_RMDIR);
810 if (ret < 0)
811 goto out;
813 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
815 ret = send_cmd(sctx);
817 tlv_put_failure:
818 out:
819 return ret;
823 * Helper function to retrieve some fields from an inode item.
825 static int __get_inode_info(struct btrfs_root *root, struct btrfs_path *path,
826 u64 ino, u64 *size, u64 *gen, u64 *mode, u64 *uid,
827 u64 *gid, u64 *rdev)
829 int ret;
830 struct btrfs_inode_item *ii;
831 struct btrfs_key key;
833 key.objectid = ino;
834 key.type = BTRFS_INODE_ITEM_KEY;
835 key.offset = 0;
836 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
837 if (ret) {
838 if (ret > 0)
839 ret = -ENOENT;
840 return ret;
843 ii = btrfs_item_ptr(path->nodes[0], path->slots[0],
844 struct btrfs_inode_item);
845 if (size)
846 *size = btrfs_inode_size(path->nodes[0], ii);
847 if (gen)
848 *gen = btrfs_inode_generation(path->nodes[0], ii);
849 if (mode)
850 *mode = btrfs_inode_mode(path->nodes[0], ii);
851 if (uid)
852 *uid = btrfs_inode_uid(path->nodes[0], ii);
853 if (gid)
854 *gid = btrfs_inode_gid(path->nodes[0], ii);
855 if (rdev)
856 *rdev = btrfs_inode_rdev(path->nodes[0], ii);
858 return ret;
861 static int get_inode_info(struct btrfs_root *root,
862 u64 ino, u64 *size, u64 *gen,
863 u64 *mode, u64 *uid, u64 *gid,
864 u64 *rdev)
866 struct btrfs_path *path;
867 int ret;
869 path = alloc_path_for_send();
870 if (!path)
871 return -ENOMEM;
872 ret = __get_inode_info(root, path, ino, size, gen, mode, uid, gid,
873 rdev);
874 btrfs_free_path(path);
875 return ret;
878 typedef int (*iterate_inode_ref_t)(int num, u64 dir, int index,
879 struct fs_path *p,
880 void *ctx);
883 * Helper function to iterate the entries in ONE btrfs_inode_ref or
884 * btrfs_inode_extref.
885 * The iterate callback may return a non zero value to stop iteration. This can
886 * be a negative value for error codes or 1 to simply stop it.
888 * path must point to the INODE_REF or INODE_EXTREF when called.
890 static int iterate_inode_ref(struct btrfs_root *root, struct btrfs_path *path,
891 struct btrfs_key *found_key, int resolve,
892 iterate_inode_ref_t iterate, void *ctx)
894 struct extent_buffer *eb = path->nodes[0];
895 struct btrfs_item *item;
896 struct btrfs_inode_ref *iref;
897 struct btrfs_inode_extref *extref;
898 struct btrfs_path *tmp_path;
899 struct fs_path *p;
900 u32 cur = 0;
901 u32 total;
902 int slot = path->slots[0];
903 u32 name_len;
904 char *start;
905 int ret = 0;
906 int num = 0;
907 int index;
908 u64 dir;
909 unsigned long name_off;
910 unsigned long elem_size;
911 unsigned long ptr;
913 p = fs_path_alloc_reversed();
914 if (!p)
915 return -ENOMEM;
917 tmp_path = alloc_path_for_send();
918 if (!tmp_path) {
919 fs_path_free(p);
920 return -ENOMEM;
924 if (found_key->type == BTRFS_INODE_REF_KEY) {
925 ptr = (unsigned long)btrfs_item_ptr(eb, slot,
926 struct btrfs_inode_ref);
927 item = btrfs_item_nr(slot);
928 total = btrfs_item_size(eb, item);
929 elem_size = sizeof(*iref);
930 } else {
931 ptr = btrfs_item_ptr_offset(eb, slot);
932 total = btrfs_item_size_nr(eb, slot);
933 elem_size = sizeof(*extref);
936 while (cur < total) {
937 fs_path_reset(p);
939 if (found_key->type == BTRFS_INODE_REF_KEY) {
940 iref = (struct btrfs_inode_ref *)(ptr + cur);
941 name_len = btrfs_inode_ref_name_len(eb, iref);
942 name_off = (unsigned long)(iref + 1);
943 index = btrfs_inode_ref_index(eb, iref);
944 dir = found_key->offset;
945 } else {
946 extref = (struct btrfs_inode_extref *)(ptr + cur);
947 name_len = btrfs_inode_extref_name_len(eb, extref);
948 name_off = (unsigned long)&extref->name;
949 index = btrfs_inode_extref_index(eb, extref);
950 dir = btrfs_inode_extref_parent(eb, extref);
953 if (resolve) {
954 start = btrfs_ref_to_path(root, tmp_path, name_len,
955 name_off, eb, dir,
956 p->buf, p->buf_len);
957 if (IS_ERR(start)) {
958 ret = PTR_ERR(start);
959 goto out;
961 if (start < p->buf) {
962 /* overflow , try again with larger buffer */
963 ret = fs_path_ensure_buf(p,
964 p->buf_len + p->buf - start);
965 if (ret < 0)
966 goto out;
967 start = btrfs_ref_to_path(root, tmp_path,
968 name_len, name_off,
969 eb, dir,
970 p->buf, p->buf_len);
971 if (IS_ERR(start)) {
972 ret = PTR_ERR(start);
973 goto out;
975 BUG_ON(start < p->buf);
977 p->start = start;
978 } else {
979 ret = fs_path_add_from_extent_buffer(p, eb, name_off,
980 name_len);
981 if (ret < 0)
982 goto out;
985 cur += elem_size + name_len;
986 ret = iterate(num, dir, index, p, ctx);
987 if (ret)
988 goto out;
989 num++;
992 out:
993 btrfs_free_path(tmp_path);
994 fs_path_free(p);
995 return ret;
998 typedef int (*iterate_dir_item_t)(int num, struct btrfs_key *di_key,
999 const char *name, int name_len,
1000 const char *data, int data_len,
1001 u8 type, void *ctx);
1004 * Helper function to iterate the entries in ONE btrfs_dir_item.
1005 * The iterate callback may return a non zero value to stop iteration. This can
1006 * be a negative value for error codes or 1 to simply stop it.
1008 * path must point to the dir item when called.
1010 static int iterate_dir_item(struct btrfs_root *root, struct btrfs_path *path,
1011 iterate_dir_item_t iterate, void *ctx)
1013 int ret = 0;
1014 struct extent_buffer *eb;
1015 struct btrfs_item *item;
1016 struct btrfs_dir_item *di;
1017 struct btrfs_key di_key;
1018 char *buf = NULL;
1019 int buf_len;
1020 u32 name_len;
1021 u32 data_len;
1022 u32 cur;
1023 u32 len;
1024 u32 total;
1025 int slot;
1026 int num;
1027 u8 type;
1030 * Start with a small buffer (1 page). If later we end up needing more
1031 * space, which can happen for xattrs on a fs with a leaf size greater
1032 * then the page size, attempt to increase the buffer. Typically xattr
1033 * values are small.
1035 buf_len = PATH_MAX;
1036 buf = kmalloc(buf_len, GFP_KERNEL);
1037 if (!buf) {
1038 ret = -ENOMEM;
1039 goto out;
1042 eb = path->nodes[0];
1043 slot = path->slots[0];
1044 item = btrfs_item_nr(slot);
1045 di = btrfs_item_ptr(eb, slot, struct btrfs_dir_item);
1046 cur = 0;
1047 len = 0;
1048 total = btrfs_item_size(eb, item);
1050 num = 0;
1051 while (cur < total) {
1052 name_len = btrfs_dir_name_len(eb, di);
1053 data_len = btrfs_dir_data_len(eb, di);
1054 type = btrfs_dir_type(eb, di);
1055 btrfs_dir_item_key_to_cpu(eb, di, &di_key);
1057 if (type == BTRFS_FT_XATTR) {
1058 if (name_len > XATTR_NAME_MAX) {
1059 ret = -ENAMETOOLONG;
1060 goto out;
1062 if (name_len + data_len >
1063 BTRFS_MAX_XATTR_SIZE(root->fs_info)) {
1064 ret = -E2BIG;
1065 goto out;
1067 } else {
1069 * Path too long
1071 if (name_len + data_len > PATH_MAX) {
1072 ret = -ENAMETOOLONG;
1073 goto out;
1077 if (name_len + data_len > buf_len) {
1078 buf_len = name_len + data_len;
1079 if (is_vmalloc_addr(buf)) {
1080 vfree(buf);
1081 buf = NULL;
1082 } else {
1083 char *tmp = krealloc(buf, buf_len,
1084 GFP_KERNEL | __GFP_NOWARN);
1086 if (!tmp)
1087 kfree(buf);
1088 buf = tmp;
1090 if (!buf) {
1091 buf = kvmalloc(buf_len, GFP_KERNEL);
1092 if (!buf) {
1093 ret = -ENOMEM;
1094 goto out;
1099 read_extent_buffer(eb, buf, (unsigned long)(di + 1),
1100 name_len + data_len);
1102 len = sizeof(*di) + name_len + data_len;
1103 di = (struct btrfs_dir_item *)((char *)di + len);
1104 cur += len;
1106 ret = iterate(num, &di_key, buf, name_len, buf + name_len,
1107 data_len, type, ctx);
1108 if (ret < 0)
1109 goto out;
1110 if (ret) {
1111 ret = 0;
1112 goto out;
1115 num++;
1118 out:
1119 kvfree(buf);
1120 return ret;
1123 static int __copy_first_ref(int num, u64 dir, int index,
1124 struct fs_path *p, void *ctx)
1126 int ret;
1127 struct fs_path *pt = ctx;
1129 ret = fs_path_copy(pt, p);
1130 if (ret < 0)
1131 return ret;
1133 /* we want the first only */
1134 return 1;
1138 * Retrieve the first path of an inode. If an inode has more then one
1139 * ref/hardlink, this is ignored.
1141 static int get_inode_path(struct btrfs_root *root,
1142 u64 ino, struct fs_path *path)
1144 int ret;
1145 struct btrfs_key key, found_key;
1146 struct btrfs_path *p;
1148 p = alloc_path_for_send();
1149 if (!p)
1150 return -ENOMEM;
1152 fs_path_reset(path);
1154 key.objectid = ino;
1155 key.type = BTRFS_INODE_REF_KEY;
1156 key.offset = 0;
1158 ret = btrfs_search_slot_for_read(root, &key, p, 1, 0);
1159 if (ret < 0)
1160 goto out;
1161 if (ret) {
1162 ret = 1;
1163 goto out;
1165 btrfs_item_key_to_cpu(p->nodes[0], &found_key, p->slots[0]);
1166 if (found_key.objectid != ino ||
1167 (found_key.type != BTRFS_INODE_REF_KEY &&
1168 found_key.type != BTRFS_INODE_EXTREF_KEY)) {
1169 ret = -ENOENT;
1170 goto out;
1173 ret = iterate_inode_ref(root, p, &found_key, 1,
1174 __copy_first_ref, path);
1175 if (ret < 0)
1176 goto out;
1177 ret = 0;
1179 out:
1180 btrfs_free_path(p);
1181 return ret;
1184 struct backref_ctx {
1185 struct send_ctx *sctx;
1187 /* number of total found references */
1188 u64 found;
1191 * used for clones found in send_root. clones found behind cur_objectid
1192 * and cur_offset are not considered as allowed clones.
1194 u64 cur_objectid;
1195 u64 cur_offset;
1197 /* may be truncated in case it's the last extent in a file */
1198 u64 extent_len;
1200 /* data offset in the file extent item */
1201 u64 data_offset;
1203 /* Just to check for bugs in backref resolving */
1204 int found_itself;
1207 static int __clone_root_cmp_bsearch(const void *key, const void *elt)
1209 u64 root = (u64)(uintptr_t)key;
1210 struct clone_root *cr = (struct clone_root *)elt;
1212 if (root < cr->root->root_key.objectid)
1213 return -1;
1214 if (root > cr->root->root_key.objectid)
1215 return 1;
1216 return 0;
1219 static int __clone_root_cmp_sort(const void *e1, const void *e2)
1221 struct clone_root *cr1 = (struct clone_root *)e1;
1222 struct clone_root *cr2 = (struct clone_root *)e2;
1224 if (cr1->root->root_key.objectid < cr2->root->root_key.objectid)
1225 return -1;
1226 if (cr1->root->root_key.objectid > cr2->root->root_key.objectid)
1227 return 1;
1228 return 0;
1232 * Called for every backref that is found for the current extent.
1233 * Results are collected in sctx->clone_roots->ino/offset/found_refs
1235 static int __iterate_backrefs(u64 ino, u64 offset, u64 root, void *ctx_)
1237 struct backref_ctx *bctx = ctx_;
1238 struct clone_root *found;
1240 /* First check if the root is in the list of accepted clone sources */
1241 found = bsearch((void *)(uintptr_t)root, bctx->sctx->clone_roots,
1242 bctx->sctx->clone_roots_cnt,
1243 sizeof(struct clone_root),
1244 __clone_root_cmp_bsearch);
1245 if (!found)
1246 return 0;
1248 if (found->root == bctx->sctx->send_root &&
1249 ino == bctx->cur_objectid &&
1250 offset == bctx->cur_offset) {
1251 bctx->found_itself = 1;
1255 * Make sure we don't consider clones from send_root that are
1256 * behind the current inode/offset.
1258 if (found->root == bctx->sctx->send_root) {
1260 * If the source inode was not yet processed we can't issue a
1261 * clone operation, as the source extent does not exist yet at
1262 * the destination of the stream.
1264 if (ino > bctx->cur_objectid)
1265 return 0;
1267 * We clone from the inode currently being sent as long as the
1268 * source extent is already processed, otherwise we could try
1269 * to clone from an extent that does not exist yet at the
1270 * destination of the stream.
1272 if (ino == bctx->cur_objectid &&
1273 offset + bctx->extent_len >
1274 bctx->sctx->cur_inode_next_write_offset)
1275 return 0;
1278 bctx->found++;
1279 found->found_refs++;
1280 if (ino < found->ino) {
1281 found->ino = ino;
1282 found->offset = offset;
1283 } else if (found->ino == ino) {
1285 * same extent found more then once in the same file.
1287 if (found->offset > offset + bctx->extent_len)
1288 found->offset = offset;
1291 return 0;
1295 * Given an inode, offset and extent item, it finds a good clone for a clone
1296 * instruction. Returns -ENOENT when none could be found. The function makes
1297 * sure that the returned clone is usable at the point where sending is at the
1298 * moment. This means, that no clones are accepted which lie behind the current
1299 * inode+offset.
1301 * path must point to the extent item when called.
1303 static int find_extent_clone(struct send_ctx *sctx,
1304 struct btrfs_path *path,
1305 u64 ino, u64 data_offset,
1306 u64 ino_size,
1307 struct clone_root **found)
1309 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
1310 int ret;
1311 int extent_type;
1312 u64 logical;
1313 u64 disk_byte;
1314 u64 num_bytes;
1315 u64 extent_item_pos;
1316 u64 flags = 0;
1317 struct btrfs_file_extent_item *fi;
1318 struct extent_buffer *eb = path->nodes[0];
1319 struct backref_ctx *backref_ctx = NULL;
1320 struct clone_root *cur_clone_root;
1321 struct btrfs_key found_key;
1322 struct btrfs_path *tmp_path;
1323 struct btrfs_extent_item *ei;
1324 int compressed;
1325 u32 i;
1327 tmp_path = alloc_path_for_send();
1328 if (!tmp_path)
1329 return -ENOMEM;
1331 /* We only use this path under the commit sem */
1332 tmp_path->need_commit_sem = 0;
1334 backref_ctx = kmalloc(sizeof(*backref_ctx), GFP_KERNEL);
1335 if (!backref_ctx) {
1336 ret = -ENOMEM;
1337 goto out;
1340 if (data_offset >= ino_size) {
1342 * There may be extents that lie behind the file's size.
1343 * I at least had this in combination with snapshotting while
1344 * writing large files.
1346 ret = 0;
1347 goto out;
1350 fi = btrfs_item_ptr(eb, path->slots[0],
1351 struct btrfs_file_extent_item);
1352 extent_type = btrfs_file_extent_type(eb, fi);
1353 if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
1354 ret = -ENOENT;
1355 goto out;
1357 compressed = btrfs_file_extent_compression(eb, fi);
1359 num_bytes = btrfs_file_extent_num_bytes(eb, fi);
1360 disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
1361 if (disk_byte == 0) {
1362 ret = -ENOENT;
1363 goto out;
1365 logical = disk_byte + btrfs_file_extent_offset(eb, fi);
1367 down_read(&fs_info->commit_root_sem);
1368 ret = extent_from_logical(fs_info, disk_byte, tmp_path,
1369 &found_key, &flags);
1370 up_read(&fs_info->commit_root_sem);
1372 if (ret < 0)
1373 goto out;
1374 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1375 ret = -EIO;
1376 goto out;
1379 ei = btrfs_item_ptr(tmp_path->nodes[0], tmp_path->slots[0],
1380 struct btrfs_extent_item);
1382 * Backreference walking (iterate_extent_inodes() below) is currently
1383 * too expensive when an extent has a large number of references, both
1384 * in time spent and used memory. So for now just fallback to write
1385 * operations instead of clone operations when an extent has more than
1386 * a certain amount of references.
1388 if (btrfs_extent_refs(tmp_path->nodes[0], ei) > SEND_MAX_EXTENT_REFS) {
1389 ret = -ENOENT;
1390 goto out;
1392 btrfs_release_path(tmp_path);
1395 * Setup the clone roots.
1397 for (i = 0; i < sctx->clone_roots_cnt; i++) {
1398 cur_clone_root = sctx->clone_roots + i;
1399 cur_clone_root->ino = (u64)-1;
1400 cur_clone_root->offset = 0;
1401 cur_clone_root->found_refs = 0;
1404 backref_ctx->sctx = sctx;
1405 backref_ctx->found = 0;
1406 backref_ctx->cur_objectid = ino;
1407 backref_ctx->cur_offset = data_offset;
1408 backref_ctx->found_itself = 0;
1409 backref_ctx->extent_len = num_bytes;
1411 * For non-compressed extents iterate_extent_inodes() gives us extent
1412 * offsets that already take into account the data offset, but not for
1413 * compressed extents, since the offset is logical and not relative to
1414 * the physical extent locations. We must take this into account to
1415 * avoid sending clone offsets that go beyond the source file's size,
1416 * which would result in the clone ioctl failing with -EINVAL on the
1417 * receiving end.
1419 if (compressed == BTRFS_COMPRESS_NONE)
1420 backref_ctx->data_offset = 0;
1421 else
1422 backref_ctx->data_offset = btrfs_file_extent_offset(eb, fi);
1425 * The last extent of a file may be too large due to page alignment.
1426 * We need to adjust extent_len in this case so that the checks in
1427 * __iterate_backrefs work.
1429 if (data_offset + num_bytes >= ino_size)
1430 backref_ctx->extent_len = ino_size - data_offset;
1433 * Now collect all backrefs.
1435 if (compressed == BTRFS_COMPRESS_NONE)
1436 extent_item_pos = logical - found_key.objectid;
1437 else
1438 extent_item_pos = 0;
1439 ret = iterate_extent_inodes(fs_info, found_key.objectid,
1440 extent_item_pos, 1, __iterate_backrefs,
1441 backref_ctx, false);
1443 if (ret < 0)
1444 goto out;
1446 if (!backref_ctx->found_itself) {
1447 /* found a bug in backref code? */
1448 ret = -EIO;
1449 btrfs_err(fs_info,
1450 "did not find backref in send_root. inode=%llu, offset=%llu, disk_byte=%llu found extent=%llu",
1451 ino, data_offset, disk_byte, found_key.objectid);
1452 goto out;
1455 btrfs_debug(fs_info,
1456 "find_extent_clone: data_offset=%llu, ino=%llu, num_bytes=%llu, logical=%llu",
1457 data_offset, ino, num_bytes, logical);
1459 if (!backref_ctx->found)
1460 btrfs_debug(fs_info, "no clones found");
1462 cur_clone_root = NULL;
1463 for (i = 0; i < sctx->clone_roots_cnt; i++) {
1464 if (sctx->clone_roots[i].found_refs) {
1465 if (!cur_clone_root)
1466 cur_clone_root = sctx->clone_roots + i;
1467 else if (sctx->clone_roots[i].root == sctx->send_root)
1468 /* prefer clones from send_root over others */
1469 cur_clone_root = sctx->clone_roots + i;
1474 if (cur_clone_root) {
1475 *found = cur_clone_root;
1476 ret = 0;
1477 } else {
1478 ret = -ENOENT;
1481 out:
1482 btrfs_free_path(tmp_path);
1483 kfree(backref_ctx);
1484 return ret;
1487 static int read_symlink(struct btrfs_root *root,
1488 u64 ino,
1489 struct fs_path *dest)
1491 int ret;
1492 struct btrfs_path *path;
1493 struct btrfs_key key;
1494 struct btrfs_file_extent_item *ei;
1495 u8 type;
1496 u8 compression;
1497 unsigned long off;
1498 int len;
1500 path = alloc_path_for_send();
1501 if (!path)
1502 return -ENOMEM;
1504 key.objectid = ino;
1505 key.type = BTRFS_EXTENT_DATA_KEY;
1506 key.offset = 0;
1507 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1508 if (ret < 0)
1509 goto out;
1510 if (ret) {
1512 * An empty symlink inode. Can happen in rare error paths when
1513 * creating a symlink (transaction committed before the inode
1514 * eviction handler removed the symlink inode items and a crash
1515 * happened in between or the subvol was snapshoted in between).
1516 * Print an informative message to dmesg/syslog so that the user
1517 * can delete the symlink.
1519 btrfs_err(root->fs_info,
1520 "Found empty symlink inode %llu at root %llu",
1521 ino, root->root_key.objectid);
1522 ret = -EIO;
1523 goto out;
1526 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
1527 struct btrfs_file_extent_item);
1528 type = btrfs_file_extent_type(path->nodes[0], ei);
1529 compression = btrfs_file_extent_compression(path->nodes[0], ei);
1530 BUG_ON(type != BTRFS_FILE_EXTENT_INLINE);
1531 BUG_ON(compression);
1533 off = btrfs_file_extent_inline_start(ei);
1534 len = btrfs_file_extent_ram_bytes(path->nodes[0], ei);
1536 ret = fs_path_add_from_extent_buffer(dest, path->nodes[0], off, len);
1538 out:
1539 btrfs_free_path(path);
1540 return ret;
1544 * Helper function to generate a file name that is unique in the root of
1545 * send_root and parent_root. This is used to generate names for orphan inodes.
1547 static int gen_unique_name(struct send_ctx *sctx,
1548 u64 ino, u64 gen,
1549 struct fs_path *dest)
1551 int ret = 0;
1552 struct btrfs_path *path;
1553 struct btrfs_dir_item *di;
1554 char tmp[64];
1555 int len;
1556 u64 idx = 0;
1558 path = alloc_path_for_send();
1559 if (!path)
1560 return -ENOMEM;
1562 while (1) {
1563 len = snprintf(tmp, sizeof(tmp), "o%llu-%llu-%llu",
1564 ino, gen, idx);
1565 ASSERT(len < sizeof(tmp));
1567 di = btrfs_lookup_dir_item(NULL, sctx->send_root,
1568 path, BTRFS_FIRST_FREE_OBJECTID,
1569 tmp, strlen(tmp), 0);
1570 btrfs_release_path(path);
1571 if (IS_ERR(di)) {
1572 ret = PTR_ERR(di);
1573 goto out;
1575 if (di) {
1576 /* not unique, try again */
1577 idx++;
1578 continue;
1581 if (!sctx->parent_root) {
1582 /* unique */
1583 ret = 0;
1584 break;
1587 di = btrfs_lookup_dir_item(NULL, sctx->parent_root,
1588 path, BTRFS_FIRST_FREE_OBJECTID,
1589 tmp, strlen(tmp), 0);
1590 btrfs_release_path(path);
1591 if (IS_ERR(di)) {
1592 ret = PTR_ERR(di);
1593 goto out;
1595 if (di) {
1596 /* not unique, try again */
1597 idx++;
1598 continue;
1600 /* unique */
1601 break;
1604 ret = fs_path_add(dest, tmp, strlen(tmp));
1606 out:
1607 btrfs_free_path(path);
1608 return ret;
1611 enum inode_state {
1612 inode_state_no_change,
1613 inode_state_will_create,
1614 inode_state_did_create,
1615 inode_state_will_delete,
1616 inode_state_did_delete,
1619 static int get_cur_inode_state(struct send_ctx *sctx, u64 ino, u64 gen)
1621 int ret;
1622 int left_ret;
1623 int right_ret;
1624 u64 left_gen;
1625 u64 right_gen;
1627 ret = get_inode_info(sctx->send_root, ino, NULL, &left_gen, NULL, NULL,
1628 NULL, NULL);
1629 if (ret < 0 && ret != -ENOENT)
1630 goto out;
1631 left_ret = ret;
1633 if (!sctx->parent_root) {
1634 right_ret = -ENOENT;
1635 } else {
1636 ret = get_inode_info(sctx->parent_root, ino, NULL, &right_gen,
1637 NULL, NULL, NULL, NULL);
1638 if (ret < 0 && ret != -ENOENT)
1639 goto out;
1640 right_ret = ret;
1643 if (!left_ret && !right_ret) {
1644 if (left_gen == gen && right_gen == gen) {
1645 ret = inode_state_no_change;
1646 } else if (left_gen == gen) {
1647 if (ino < sctx->send_progress)
1648 ret = inode_state_did_create;
1649 else
1650 ret = inode_state_will_create;
1651 } else if (right_gen == gen) {
1652 if (ino < sctx->send_progress)
1653 ret = inode_state_did_delete;
1654 else
1655 ret = inode_state_will_delete;
1656 } else {
1657 ret = -ENOENT;
1659 } else if (!left_ret) {
1660 if (left_gen == gen) {
1661 if (ino < sctx->send_progress)
1662 ret = inode_state_did_create;
1663 else
1664 ret = inode_state_will_create;
1665 } else {
1666 ret = -ENOENT;
1668 } else if (!right_ret) {
1669 if (right_gen == gen) {
1670 if (ino < sctx->send_progress)
1671 ret = inode_state_did_delete;
1672 else
1673 ret = inode_state_will_delete;
1674 } else {
1675 ret = -ENOENT;
1677 } else {
1678 ret = -ENOENT;
1681 out:
1682 return ret;
1685 static int is_inode_existent(struct send_ctx *sctx, u64 ino, u64 gen)
1687 int ret;
1689 if (ino == BTRFS_FIRST_FREE_OBJECTID)
1690 return 1;
1692 ret = get_cur_inode_state(sctx, ino, gen);
1693 if (ret < 0)
1694 goto out;
1696 if (ret == inode_state_no_change ||
1697 ret == inode_state_did_create ||
1698 ret == inode_state_will_delete)
1699 ret = 1;
1700 else
1701 ret = 0;
1703 out:
1704 return ret;
1708 * Helper function to lookup a dir item in a dir.
1710 static int lookup_dir_item_inode(struct btrfs_root *root,
1711 u64 dir, const char *name, int name_len,
1712 u64 *found_inode,
1713 u8 *found_type)
1715 int ret = 0;
1716 struct btrfs_dir_item *di;
1717 struct btrfs_key key;
1718 struct btrfs_path *path;
1720 path = alloc_path_for_send();
1721 if (!path)
1722 return -ENOMEM;
1724 di = btrfs_lookup_dir_item(NULL, root, path,
1725 dir, name, name_len, 0);
1726 if (IS_ERR_OR_NULL(di)) {
1727 ret = di ? PTR_ERR(di) : -ENOENT;
1728 goto out;
1730 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &key);
1731 if (key.type == BTRFS_ROOT_ITEM_KEY) {
1732 ret = -ENOENT;
1733 goto out;
1735 *found_inode = key.objectid;
1736 *found_type = btrfs_dir_type(path->nodes[0], di);
1738 out:
1739 btrfs_free_path(path);
1740 return ret;
1744 * Looks up the first btrfs_inode_ref of a given ino. It returns the parent dir,
1745 * generation of the parent dir and the name of the dir entry.
1747 static int get_first_ref(struct btrfs_root *root, u64 ino,
1748 u64 *dir, u64 *dir_gen, struct fs_path *name)
1750 int ret;
1751 struct btrfs_key key;
1752 struct btrfs_key found_key;
1753 struct btrfs_path *path;
1754 int len;
1755 u64 parent_dir;
1757 path = alloc_path_for_send();
1758 if (!path)
1759 return -ENOMEM;
1761 key.objectid = ino;
1762 key.type = BTRFS_INODE_REF_KEY;
1763 key.offset = 0;
1765 ret = btrfs_search_slot_for_read(root, &key, path, 1, 0);
1766 if (ret < 0)
1767 goto out;
1768 if (!ret)
1769 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1770 path->slots[0]);
1771 if (ret || found_key.objectid != ino ||
1772 (found_key.type != BTRFS_INODE_REF_KEY &&
1773 found_key.type != BTRFS_INODE_EXTREF_KEY)) {
1774 ret = -ENOENT;
1775 goto out;
1778 if (found_key.type == BTRFS_INODE_REF_KEY) {
1779 struct btrfs_inode_ref *iref;
1780 iref = btrfs_item_ptr(path->nodes[0], path->slots[0],
1781 struct btrfs_inode_ref);
1782 len = btrfs_inode_ref_name_len(path->nodes[0], iref);
1783 ret = fs_path_add_from_extent_buffer(name, path->nodes[0],
1784 (unsigned long)(iref + 1),
1785 len);
1786 parent_dir = found_key.offset;
1787 } else {
1788 struct btrfs_inode_extref *extref;
1789 extref = btrfs_item_ptr(path->nodes[0], path->slots[0],
1790 struct btrfs_inode_extref);
1791 len = btrfs_inode_extref_name_len(path->nodes[0], extref);
1792 ret = fs_path_add_from_extent_buffer(name, path->nodes[0],
1793 (unsigned long)&extref->name, len);
1794 parent_dir = btrfs_inode_extref_parent(path->nodes[0], extref);
1796 if (ret < 0)
1797 goto out;
1798 btrfs_release_path(path);
1800 if (dir_gen) {
1801 ret = get_inode_info(root, parent_dir, NULL, dir_gen, NULL,
1802 NULL, NULL, NULL);
1803 if (ret < 0)
1804 goto out;
1807 *dir = parent_dir;
1809 out:
1810 btrfs_free_path(path);
1811 return ret;
1814 static int is_first_ref(struct btrfs_root *root,
1815 u64 ino, u64 dir,
1816 const char *name, int name_len)
1818 int ret;
1819 struct fs_path *tmp_name;
1820 u64 tmp_dir;
1822 tmp_name = fs_path_alloc();
1823 if (!tmp_name)
1824 return -ENOMEM;
1826 ret = get_first_ref(root, ino, &tmp_dir, NULL, tmp_name);
1827 if (ret < 0)
1828 goto out;
1830 if (dir != tmp_dir || name_len != fs_path_len(tmp_name)) {
1831 ret = 0;
1832 goto out;
1835 ret = !memcmp(tmp_name->start, name, name_len);
1837 out:
1838 fs_path_free(tmp_name);
1839 return ret;
1843 * Used by process_recorded_refs to determine if a new ref would overwrite an
1844 * already existing ref. In case it detects an overwrite, it returns the
1845 * inode/gen in who_ino/who_gen.
1846 * When an overwrite is detected, process_recorded_refs does proper orphanizing
1847 * to make sure later references to the overwritten inode are possible.
1848 * Orphanizing is however only required for the first ref of an inode.
1849 * process_recorded_refs does an additional is_first_ref check to see if
1850 * orphanizing is really required.
1852 static int will_overwrite_ref(struct send_ctx *sctx, u64 dir, u64 dir_gen,
1853 const char *name, int name_len,
1854 u64 *who_ino, u64 *who_gen, u64 *who_mode)
1856 int ret = 0;
1857 u64 gen;
1858 u64 other_inode = 0;
1859 u8 other_type = 0;
1861 if (!sctx->parent_root)
1862 goto out;
1864 ret = is_inode_existent(sctx, dir, dir_gen);
1865 if (ret <= 0)
1866 goto out;
1869 * If we have a parent root we need to verify that the parent dir was
1870 * not deleted and then re-created, if it was then we have no overwrite
1871 * and we can just unlink this entry.
1873 if (sctx->parent_root && dir != BTRFS_FIRST_FREE_OBJECTID) {
1874 ret = get_inode_info(sctx->parent_root, dir, NULL, &gen, NULL,
1875 NULL, NULL, NULL);
1876 if (ret < 0 && ret != -ENOENT)
1877 goto out;
1878 if (ret) {
1879 ret = 0;
1880 goto out;
1882 if (gen != dir_gen)
1883 goto out;
1886 ret = lookup_dir_item_inode(sctx->parent_root, dir, name, name_len,
1887 &other_inode, &other_type);
1888 if (ret < 0 && ret != -ENOENT)
1889 goto out;
1890 if (ret) {
1891 ret = 0;
1892 goto out;
1896 * Check if the overwritten ref was already processed. If yes, the ref
1897 * was already unlinked/moved, so we can safely assume that we will not
1898 * overwrite anything at this point in time.
1900 if (other_inode > sctx->send_progress ||
1901 is_waiting_for_move(sctx, other_inode)) {
1902 ret = get_inode_info(sctx->parent_root, other_inode, NULL,
1903 who_gen, who_mode, NULL, NULL, NULL);
1904 if (ret < 0)
1905 goto out;
1907 ret = 1;
1908 *who_ino = other_inode;
1909 } else {
1910 ret = 0;
1913 out:
1914 return ret;
1918 * Checks if the ref was overwritten by an already processed inode. This is
1919 * used by __get_cur_name_and_parent to find out if the ref was orphanized and
1920 * thus the orphan name needs be used.
1921 * process_recorded_refs also uses it to avoid unlinking of refs that were
1922 * overwritten.
1924 static int did_overwrite_ref(struct send_ctx *sctx,
1925 u64 dir, u64 dir_gen,
1926 u64 ino, u64 ino_gen,
1927 const char *name, int name_len)
1929 int ret = 0;
1930 u64 gen;
1931 u64 ow_inode;
1932 u8 other_type;
1934 if (!sctx->parent_root)
1935 goto out;
1937 ret = is_inode_existent(sctx, dir, dir_gen);
1938 if (ret <= 0)
1939 goto out;
1941 if (dir != BTRFS_FIRST_FREE_OBJECTID) {
1942 ret = get_inode_info(sctx->send_root, dir, NULL, &gen, NULL,
1943 NULL, NULL, NULL);
1944 if (ret < 0 && ret != -ENOENT)
1945 goto out;
1946 if (ret) {
1947 ret = 0;
1948 goto out;
1950 if (gen != dir_gen)
1951 goto out;
1954 /* check if the ref was overwritten by another ref */
1955 ret = lookup_dir_item_inode(sctx->send_root, dir, name, name_len,
1956 &ow_inode, &other_type);
1957 if (ret < 0 && ret != -ENOENT)
1958 goto out;
1959 if (ret) {
1960 /* was never and will never be overwritten */
1961 ret = 0;
1962 goto out;
1965 ret = get_inode_info(sctx->send_root, ow_inode, NULL, &gen, NULL, NULL,
1966 NULL, NULL);
1967 if (ret < 0)
1968 goto out;
1970 if (ow_inode == ino && gen == ino_gen) {
1971 ret = 0;
1972 goto out;
1976 * We know that it is or will be overwritten. Check this now.
1977 * The current inode being processed might have been the one that caused
1978 * inode 'ino' to be orphanized, therefore check if ow_inode matches
1979 * the current inode being processed.
1981 if ((ow_inode < sctx->send_progress) ||
1982 (ino != sctx->cur_ino && ow_inode == sctx->cur_ino &&
1983 gen == sctx->cur_inode_gen))
1984 ret = 1;
1985 else
1986 ret = 0;
1988 out:
1989 return ret;
1993 * Same as did_overwrite_ref, but also checks if it is the first ref of an inode
1994 * that got overwritten. This is used by process_recorded_refs to determine
1995 * if it has to use the path as returned by get_cur_path or the orphan name.
1997 static int did_overwrite_first_ref(struct send_ctx *sctx, u64 ino, u64 gen)
1999 int ret = 0;
2000 struct fs_path *name = NULL;
2001 u64 dir;
2002 u64 dir_gen;
2004 if (!sctx->parent_root)
2005 goto out;
2007 name = fs_path_alloc();
2008 if (!name)
2009 return -ENOMEM;
2011 ret = get_first_ref(sctx->parent_root, ino, &dir, &dir_gen, name);
2012 if (ret < 0)
2013 goto out;
2015 ret = did_overwrite_ref(sctx, dir, dir_gen, ino, gen,
2016 name->start, fs_path_len(name));
2018 out:
2019 fs_path_free(name);
2020 return ret;
2024 * Insert a name cache entry. On 32bit kernels the radix tree index is 32bit,
2025 * so we need to do some special handling in case we have clashes. This function
2026 * takes care of this with the help of name_cache_entry::radix_list.
2027 * In case of error, nce is kfreed.
2029 static int name_cache_insert(struct send_ctx *sctx,
2030 struct name_cache_entry *nce)
2032 int ret = 0;
2033 struct list_head *nce_head;
2035 nce_head = radix_tree_lookup(&sctx->name_cache,
2036 (unsigned long)nce->ino);
2037 if (!nce_head) {
2038 nce_head = kmalloc(sizeof(*nce_head), GFP_KERNEL);
2039 if (!nce_head) {
2040 kfree(nce);
2041 return -ENOMEM;
2043 INIT_LIST_HEAD(nce_head);
2045 ret = radix_tree_insert(&sctx->name_cache, nce->ino, nce_head);
2046 if (ret < 0) {
2047 kfree(nce_head);
2048 kfree(nce);
2049 return ret;
2052 list_add_tail(&nce->radix_list, nce_head);
2053 list_add_tail(&nce->list, &sctx->name_cache_list);
2054 sctx->name_cache_size++;
2056 return ret;
2059 static void name_cache_delete(struct send_ctx *sctx,
2060 struct name_cache_entry *nce)
2062 struct list_head *nce_head;
2064 nce_head = radix_tree_lookup(&sctx->name_cache,
2065 (unsigned long)nce->ino);
2066 if (!nce_head) {
2067 btrfs_err(sctx->send_root->fs_info,
2068 "name_cache_delete lookup failed ino %llu cache size %d, leaking memory",
2069 nce->ino, sctx->name_cache_size);
2072 list_del(&nce->radix_list);
2073 list_del(&nce->list);
2074 sctx->name_cache_size--;
2077 * We may not get to the final release of nce_head if the lookup fails
2079 if (nce_head && list_empty(nce_head)) {
2080 radix_tree_delete(&sctx->name_cache, (unsigned long)nce->ino);
2081 kfree(nce_head);
2085 static struct name_cache_entry *name_cache_search(struct send_ctx *sctx,
2086 u64 ino, u64 gen)
2088 struct list_head *nce_head;
2089 struct name_cache_entry *cur;
2091 nce_head = radix_tree_lookup(&sctx->name_cache, (unsigned long)ino);
2092 if (!nce_head)
2093 return NULL;
2095 list_for_each_entry(cur, nce_head, radix_list) {
2096 if (cur->ino == ino && cur->gen == gen)
2097 return cur;
2099 return NULL;
2103 * Removes the entry from the list and adds it back to the end. This marks the
2104 * entry as recently used so that name_cache_clean_unused does not remove it.
2106 static void name_cache_used(struct send_ctx *sctx, struct name_cache_entry *nce)
2108 list_del(&nce->list);
2109 list_add_tail(&nce->list, &sctx->name_cache_list);
2113 * Remove some entries from the beginning of name_cache_list.
2115 static void name_cache_clean_unused(struct send_ctx *sctx)
2117 struct name_cache_entry *nce;
2119 if (sctx->name_cache_size < SEND_CTX_NAME_CACHE_CLEAN_SIZE)
2120 return;
2122 while (sctx->name_cache_size > SEND_CTX_MAX_NAME_CACHE_SIZE) {
2123 nce = list_entry(sctx->name_cache_list.next,
2124 struct name_cache_entry, list);
2125 name_cache_delete(sctx, nce);
2126 kfree(nce);
2130 static void name_cache_free(struct send_ctx *sctx)
2132 struct name_cache_entry *nce;
2134 while (!list_empty(&sctx->name_cache_list)) {
2135 nce = list_entry(sctx->name_cache_list.next,
2136 struct name_cache_entry, list);
2137 name_cache_delete(sctx, nce);
2138 kfree(nce);
2143 * Used by get_cur_path for each ref up to the root.
2144 * Returns 0 if it succeeded.
2145 * Returns 1 if the inode is not existent or got overwritten. In that case, the
2146 * name is an orphan name. This instructs get_cur_path to stop iterating. If 1
2147 * is returned, parent_ino/parent_gen are not guaranteed to be valid.
2148 * Returns <0 in case of error.
2150 static int __get_cur_name_and_parent(struct send_ctx *sctx,
2151 u64 ino, u64 gen,
2152 u64 *parent_ino,
2153 u64 *parent_gen,
2154 struct fs_path *dest)
2156 int ret;
2157 int nce_ret;
2158 struct name_cache_entry *nce = NULL;
2161 * First check if we already did a call to this function with the same
2162 * ino/gen. If yes, check if the cache entry is still up-to-date. If yes
2163 * return the cached result.
2165 nce = name_cache_search(sctx, ino, gen);
2166 if (nce) {
2167 if (ino < sctx->send_progress && nce->need_later_update) {
2168 name_cache_delete(sctx, nce);
2169 kfree(nce);
2170 nce = NULL;
2171 } else {
2172 name_cache_used(sctx, nce);
2173 *parent_ino = nce->parent_ino;
2174 *parent_gen = nce->parent_gen;
2175 ret = fs_path_add(dest, nce->name, nce->name_len);
2176 if (ret < 0)
2177 goto out;
2178 ret = nce->ret;
2179 goto out;
2184 * If the inode is not existent yet, add the orphan name and return 1.
2185 * This should only happen for the parent dir that we determine in
2186 * __record_new_ref
2188 ret = is_inode_existent(sctx, ino, gen);
2189 if (ret < 0)
2190 goto out;
2192 if (!ret) {
2193 ret = gen_unique_name(sctx, ino, gen, dest);
2194 if (ret < 0)
2195 goto out;
2196 ret = 1;
2197 goto out_cache;
2201 * Depending on whether the inode was already processed or not, use
2202 * send_root or parent_root for ref lookup.
2204 if (ino < sctx->send_progress)
2205 ret = get_first_ref(sctx->send_root, ino,
2206 parent_ino, parent_gen, dest);
2207 else
2208 ret = get_first_ref(sctx->parent_root, ino,
2209 parent_ino, parent_gen, dest);
2210 if (ret < 0)
2211 goto out;
2214 * Check if the ref was overwritten by an inode's ref that was processed
2215 * earlier. If yes, treat as orphan and return 1.
2217 ret = did_overwrite_ref(sctx, *parent_ino, *parent_gen, ino, gen,
2218 dest->start, dest->end - dest->start);
2219 if (ret < 0)
2220 goto out;
2221 if (ret) {
2222 fs_path_reset(dest);
2223 ret = gen_unique_name(sctx, ino, gen, dest);
2224 if (ret < 0)
2225 goto out;
2226 ret = 1;
2229 out_cache:
2231 * Store the result of the lookup in the name cache.
2233 nce = kmalloc(sizeof(*nce) + fs_path_len(dest) + 1, GFP_KERNEL);
2234 if (!nce) {
2235 ret = -ENOMEM;
2236 goto out;
2239 nce->ino = ino;
2240 nce->gen = gen;
2241 nce->parent_ino = *parent_ino;
2242 nce->parent_gen = *parent_gen;
2243 nce->name_len = fs_path_len(dest);
2244 nce->ret = ret;
2245 strcpy(nce->name, dest->start);
2247 if (ino < sctx->send_progress)
2248 nce->need_later_update = 0;
2249 else
2250 nce->need_later_update = 1;
2252 nce_ret = name_cache_insert(sctx, nce);
2253 if (nce_ret < 0)
2254 ret = nce_ret;
2255 name_cache_clean_unused(sctx);
2257 out:
2258 return ret;
2262 * Magic happens here. This function returns the first ref to an inode as it
2263 * would look like while receiving the stream at this point in time.
2264 * We walk the path up to the root. For every inode in between, we check if it
2265 * was already processed/sent. If yes, we continue with the parent as found
2266 * in send_root. If not, we continue with the parent as found in parent_root.
2267 * If we encounter an inode that was deleted at this point in time, we use the
2268 * inodes "orphan" name instead of the real name and stop. Same with new inodes
2269 * that were not created yet and overwritten inodes/refs.
2271 * When do we have orphan inodes:
2272 * 1. When an inode is freshly created and thus no valid refs are available yet
2273 * 2. When a directory lost all it's refs (deleted) but still has dir items
2274 * inside which were not processed yet (pending for move/delete). If anyone
2275 * tried to get the path to the dir items, it would get a path inside that
2276 * orphan directory.
2277 * 3. When an inode is moved around or gets new links, it may overwrite the ref
2278 * of an unprocessed inode. If in that case the first ref would be
2279 * overwritten, the overwritten inode gets "orphanized". Later when we
2280 * process this overwritten inode, it is restored at a new place by moving
2281 * the orphan inode.
2283 * sctx->send_progress tells this function at which point in time receiving
2284 * would be.
2286 static int get_cur_path(struct send_ctx *sctx, u64 ino, u64 gen,
2287 struct fs_path *dest)
2289 int ret = 0;
2290 struct fs_path *name = NULL;
2291 u64 parent_inode = 0;
2292 u64 parent_gen = 0;
2293 int stop = 0;
2295 name = fs_path_alloc();
2296 if (!name) {
2297 ret = -ENOMEM;
2298 goto out;
2301 dest->reversed = 1;
2302 fs_path_reset(dest);
2304 while (!stop && ino != BTRFS_FIRST_FREE_OBJECTID) {
2305 struct waiting_dir_move *wdm;
2307 fs_path_reset(name);
2309 if (is_waiting_for_rm(sctx, ino)) {
2310 ret = gen_unique_name(sctx, ino, gen, name);
2311 if (ret < 0)
2312 goto out;
2313 ret = fs_path_add_path(dest, name);
2314 break;
2317 wdm = get_waiting_dir_move(sctx, ino);
2318 if (wdm && wdm->orphanized) {
2319 ret = gen_unique_name(sctx, ino, gen, name);
2320 stop = 1;
2321 } else if (wdm) {
2322 ret = get_first_ref(sctx->parent_root, ino,
2323 &parent_inode, &parent_gen, name);
2324 } else {
2325 ret = __get_cur_name_and_parent(sctx, ino, gen,
2326 &parent_inode,
2327 &parent_gen, name);
2328 if (ret)
2329 stop = 1;
2332 if (ret < 0)
2333 goto out;
2335 ret = fs_path_add_path(dest, name);
2336 if (ret < 0)
2337 goto out;
2339 ino = parent_inode;
2340 gen = parent_gen;
2343 out:
2344 fs_path_free(name);
2345 if (!ret)
2346 fs_path_unreverse(dest);
2347 return ret;
2351 * Sends a BTRFS_SEND_C_SUBVOL command/item to userspace
2353 static int send_subvol_begin(struct send_ctx *sctx)
2355 int ret;
2356 struct btrfs_root *send_root = sctx->send_root;
2357 struct btrfs_root *parent_root = sctx->parent_root;
2358 struct btrfs_path *path;
2359 struct btrfs_key key;
2360 struct btrfs_root_ref *ref;
2361 struct extent_buffer *leaf;
2362 char *name = NULL;
2363 int namelen;
2365 path = btrfs_alloc_path();
2366 if (!path)
2367 return -ENOMEM;
2369 name = kmalloc(BTRFS_PATH_NAME_MAX, GFP_KERNEL);
2370 if (!name) {
2371 btrfs_free_path(path);
2372 return -ENOMEM;
2375 key.objectid = send_root->root_key.objectid;
2376 key.type = BTRFS_ROOT_BACKREF_KEY;
2377 key.offset = 0;
2379 ret = btrfs_search_slot_for_read(send_root->fs_info->tree_root,
2380 &key, path, 1, 0);
2381 if (ret < 0)
2382 goto out;
2383 if (ret) {
2384 ret = -ENOENT;
2385 goto out;
2388 leaf = path->nodes[0];
2389 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2390 if (key.type != BTRFS_ROOT_BACKREF_KEY ||
2391 key.objectid != send_root->root_key.objectid) {
2392 ret = -ENOENT;
2393 goto out;
2395 ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
2396 namelen = btrfs_root_ref_name_len(leaf, ref);
2397 read_extent_buffer(leaf, name, (unsigned long)(ref + 1), namelen);
2398 btrfs_release_path(path);
2400 if (parent_root) {
2401 ret = begin_cmd(sctx, BTRFS_SEND_C_SNAPSHOT);
2402 if (ret < 0)
2403 goto out;
2404 } else {
2405 ret = begin_cmd(sctx, BTRFS_SEND_C_SUBVOL);
2406 if (ret < 0)
2407 goto out;
2410 TLV_PUT_STRING(sctx, BTRFS_SEND_A_PATH, name, namelen);
2412 if (!btrfs_is_empty_uuid(sctx->send_root->root_item.received_uuid))
2413 TLV_PUT_UUID(sctx, BTRFS_SEND_A_UUID,
2414 sctx->send_root->root_item.received_uuid);
2415 else
2416 TLV_PUT_UUID(sctx, BTRFS_SEND_A_UUID,
2417 sctx->send_root->root_item.uuid);
2419 TLV_PUT_U64(sctx, BTRFS_SEND_A_CTRANSID,
2420 le64_to_cpu(sctx->send_root->root_item.ctransid));
2421 if (parent_root) {
2422 if (!btrfs_is_empty_uuid(parent_root->root_item.received_uuid))
2423 TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID,
2424 parent_root->root_item.received_uuid);
2425 else
2426 TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID,
2427 parent_root->root_item.uuid);
2428 TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_CTRANSID,
2429 le64_to_cpu(sctx->parent_root->root_item.ctransid));
2432 ret = send_cmd(sctx);
2434 tlv_put_failure:
2435 out:
2436 btrfs_free_path(path);
2437 kfree(name);
2438 return ret;
2441 static int send_truncate(struct send_ctx *sctx, u64 ino, u64 gen, u64 size)
2443 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
2444 int ret = 0;
2445 struct fs_path *p;
2447 btrfs_debug(fs_info, "send_truncate %llu size=%llu", ino, size);
2449 p = fs_path_alloc();
2450 if (!p)
2451 return -ENOMEM;
2453 ret = begin_cmd(sctx, BTRFS_SEND_C_TRUNCATE);
2454 if (ret < 0)
2455 goto out;
2457 ret = get_cur_path(sctx, ino, gen, p);
2458 if (ret < 0)
2459 goto out;
2460 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2461 TLV_PUT_U64(sctx, BTRFS_SEND_A_SIZE, size);
2463 ret = send_cmd(sctx);
2465 tlv_put_failure:
2466 out:
2467 fs_path_free(p);
2468 return ret;
2471 static int send_chmod(struct send_ctx *sctx, u64 ino, u64 gen, u64 mode)
2473 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
2474 int ret = 0;
2475 struct fs_path *p;
2477 btrfs_debug(fs_info, "send_chmod %llu mode=%llu", ino, mode);
2479 p = fs_path_alloc();
2480 if (!p)
2481 return -ENOMEM;
2483 ret = begin_cmd(sctx, BTRFS_SEND_C_CHMOD);
2484 if (ret < 0)
2485 goto out;
2487 ret = get_cur_path(sctx, ino, gen, p);
2488 if (ret < 0)
2489 goto out;
2490 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2491 TLV_PUT_U64(sctx, BTRFS_SEND_A_MODE, mode & 07777);
2493 ret = send_cmd(sctx);
2495 tlv_put_failure:
2496 out:
2497 fs_path_free(p);
2498 return ret;
2501 static int send_chown(struct send_ctx *sctx, u64 ino, u64 gen, u64 uid, u64 gid)
2503 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
2504 int ret = 0;
2505 struct fs_path *p;
2507 btrfs_debug(fs_info, "send_chown %llu uid=%llu, gid=%llu",
2508 ino, uid, gid);
2510 p = fs_path_alloc();
2511 if (!p)
2512 return -ENOMEM;
2514 ret = begin_cmd(sctx, BTRFS_SEND_C_CHOWN);
2515 if (ret < 0)
2516 goto out;
2518 ret = get_cur_path(sctx, ino, gen, p);
2519 if (ret < 0)
2520 goto out;
2521 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2522 TLV_PUT_U64(sctx, BTRFS_SEND_A_UID, uid);
2523 TLV_PUT_U64(sctx, BTRFS_SEND_A_GID, gid);
2525 ret = send_cmd(sctx);
2527 tlv_put_failure:
2528 out:
2529 fs_path_free(p);
2530 return ret;
2533 static int send_utimes(struct send_ctx *sctx, u64 ino, u64 gen)
2535 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
2536 int ret = 0;
2537 struct fs_path *p = NULL;
2538 struct btrfs_inode_item *ii;
2539 struct btrfs_path *path = NULL;
2540 struct extent_buffer *eb;
2541 struct btrfs_key key;
2542 int slot;
2544 btrfs_debug(fs_info, "send_utimes %llu", ino);
2546 p = fs_path_alloc();
2547 if (!p)
2548 return -ENOMEM;
2550 path = alloc_path_for_send();
2551 if (!path) {
2552 ret = -ENOMEM;
2553 goto out;
2556 key.objectid = ino;
2557 key.type = BTRFS_INODE_ITEM_KEY;
2558 key.offset = 0;
2559 ret = btrfs_search_slot(NULL, sctx->send_root, &key, path, 0, 0);
2560 if (ret > 0)
2561 ret = -ENOENT;
2562 if (ret < 0)
2563 goto out;
2565 eb = path->nodes[0];
2566 slot = path->slots[0];
2567 ii = btrfs_item_ptr(eb, slot, struct btrfs_inode_item);
2569 ret = begin_cmd(sctx, BTRFS_SEND_C_UTIMES);
2570 if (ret < 0)
2571 goto out;
2573 ret = get_cur_path(sctx, ino, gen, p);
2574 if (ret < 0)
2575 goto out;
2576 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2577 TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_ATIME, eb, &ii->atime);
2578 TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_MTIME, eb, &ii->mtime);
2579 TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_CTIME, eb, &ii->ctime);
2580 /* TODO Add otime support when the otime patches get into upstream */
2582 ret = send_cmd(sctx);
2584 tlv_put_failure:
2585 out:
2586 fs_path_free(p);
2587 btrfs_free_path(path);
2588 return ret;
2592 * Sends a BTRFS_SEND_C_MKXXX or SYMLINK command to user space. We don't have
2593 * a valid path yet because we did not process the refs yet. So, the inode
2594 * is created as orphan.
2596 static int send_create_inode(struct send_ctx *sctx, u64 ino)
2598 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
2599 int ret = 0;
2600 struct fs_path *p;
2601 int cmd;
2602 u64 gen;
2603 u64 mode;
2604 u64 rdev;
2606 btrfs_debug(fs_info, "send_create_inode %llu", ino);
2608 p = fs_path_alloc();
2609 if (!p)
2610 return -ENOMEM;
2612 if (ino != sctx->cur_ino) {
2613 ret = get_inode_info(sctx->send_root, ino, NULL, &gen, &mode,
2614 NULL, NULL, &rdev);
2615 if (ret < 0)
2616 goto out;
2617 } else {
2618 gen = sctx->cur_inode_gen;
2619 mode = sctx->cur_inode_mode;
2620 rdev = sctx->cur_inode_rdev;
2623 if (S_ISREG(mode)) {
2624 cmd = BTRFS_SEND_C_MKFILE;
2625 } else if (S_ISDIR(mode)) {
2626 cmd = BTRFS_SEND_C_MKDIR;
2627 } else if (S_ISLNK(mode)) {
2628 cmd = BTRFS_SEND_C_SYMLINK;
2629 } else if (S_ISCHR(mode) || S_ISBLK(mode)) {
2630 cmd = BTRFS_SEND_C_MKNOD;
2631 } else if (S_ISFIFO(mode)) {
2632 cmd = BTRFS_SEND_C_MKFIFO;
2633 } else if (S_ISSOCK(mode)) {
2634 cmd = BTRFS_SEND_C_MKSOCK;
2635 } else {
2636 btrfs_warn(sctx->send_root->fs_info, "unexpected inode type %o",
2637 (int)(mode & S_IFMT));
2638 ret = -EOPNOTSUPP;
2639 goto out;
2642 ret = begin_cmd(sctx, cmd);
2643 if (ret < 0)
2644 goto out;
2646 ret = gen_unique_name(sctx, ino, gen, p);
2647 if (ret < 0)
2648 goto out;
2650 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2651 TLV_PUT_U64(sctx, BTRFS_SEND_A_INO, ino);
2653 if (S_ISLNK(mode)) {
2654 fs_path_reset(p);
2655 ret = read_symlink(sctx->send_root, ino, p);
2656 if (ret < 0)
2657 goto out;
2658 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_LINK, p);
2659 } else if (S_ISCHR(mode) || S_ISBLK(mode) ||
2660 S_ISFIFO(mode) || S_ISSOCK(mode)) {
2661 TLV_PUT_U64(sctx, BTRFS_SEND_A_RDEV, new_encode_dev(rdev));
2662 TLV_PUT_U64(sctx, BTRFS_SEND_A_MODE, mode);
2665 ret = send_cmd(sctx);
2666 if (ret < 0)
2667 goto out;
2670 tlv_put_failure:
2671 out:
2672 fs_path_free(p);
2673 return ret;
2677 * We need some special handling for inodes that get processed before the parent
2678 * directory got created. See process_recorded_refs for details.
2679 * This function does the check if we already created the dir out of order.
2681 static int did_create_dir(struct send_ctx *sctx, u64 dir)
2683 int ret = 0;
2684 struct btrfs_path *path = NULL;
2685 struct btrfs_key key;
2686 struct btrfs_key found_key;
2687 struct btrfs_key di_key;
2688 struct extent_buffer *eb;
2689 struct btrfs_dir_item *di;
2690 int slot;
2692 path = alloc_path_for_send();
2693 if (!path) {
2694 ret = -ENOMEM;
2695 goto out;
2698 key.objectid = dir;
2699 key.type = BTRFS_DIR_INDEX_KEY;
2700 key.offset = 0;
2701 ret = btrfs_search_slot(NULL, sctx->send_root, &key, path, 0, 0);
2702 if (ret < 0)
2703 goto out;
2705 while (1) {
2706 eb = path->nodes[0];
2707 slot = path->slots[0];
2708 if (slot >= btrfs_header_nritems(eb)) {
2709 ret = btrfs_next_leaf(sctx->send_root, path);
2710 if (ret < 0) {
2711 goto out;
2712 } else if (ret > 0) {
2713 ret = 0;
2714 break;
2716 continue;
2719 btrfs_item_key_to_cpu(eb, &found_key, slot);
2720 if (found_key.objectid != key.objectid ||
2721 found_key.type != key.type) {
2722 ret = 0;
2723 goto out;
2726 di = btrfs_item_ptr(eb, slot, struct btrfs_dir_item);
2727 btrfs_dir_item_key_to_cpu(eb, di, &di_key);
2729 if (di_key.type != BTRFS_ROOT_ITEM_KEY &&
2730 di_key.objectid < sctx->send_progress) {
2731 ret = 1;
2732 goto out;
2735 path->slots[0]++;
2738 out:
2739 btrfs_free_path(path);
2740 return ret;
2744 * Only creates the inode if it is:
2745 * 1. Not a directory
2746 * 2. Or a directory which was not created already due to out of order
2747 * directories. See did_create_dir and process_recorded_refs for details.
2749 static int send_create_inode_if_needed(struct send_ctx *sctx)
2751 int ret;
2753 if (S_ISDIR(sctx->cur_inode_mode)) {
2754 ret = did_create_dir(sctx, sctx->cur_ino);
2755 if (ret < 0)
2756 goto out;
2757 if (ret) {
2758 ret = 0;
2759 goto out;
2763 ret = send_create_inode(sctx, sctx->cur_ino);
2764 if (ret < 0)
2765 goto out;
2767 out:
2768 return ret;
2771 struct recorded_ref {
2772 struct list_head list;
2773 char *name;
2774 struct fs_path *full_path;
2775 u64 dir;
2776 u64 dir_gen;
2777 int name_len;
2780 static void set_ref_path(struct recorded_ref *ref, struct fs_path *path)
2782 ref->full_path = path;
2783 ref->name = (char *)kbasename(ref->full_path->start);
2784 ref->name_len = ref->full_path->end - ref->name;
2788 * We need to process new refs before deleted refs, but compare_tree gives us
2789 * everything mixed. So we first record all refs and later process them.
2790 * This function is a helper to record one ref.
2792 static int __record_ref(struct list_head *head, u64 dir,
2793 u64 dir_gen, struct fs_path *path)
2795 struct recorded_ref *ref;
2797 ref = kmalloc(sizeof(*ref), GFP_KERNEL);
2798 if (!ref)
2799 return -ENOMEM;
2801 ref->dir = dir;
2802 ref->dir_gen = dir_gen;
2803 set_ref_path(ref, path);
2804 list_add_tail(&ref->list, head);
2805 return 0;
2808 static int dup_ref(struct recorded_ref *ref, struct list_head *list)
2810 struct recorded_ref *new;
2812 new = kmalloc(sizeof(*ref), GFP_KERNEL);
2813 if (!new)
2814 return -ENOMEM;
2816 new->dir = ref->dir;
2817 new->dir_gen = ref->dir_gen;
2818 new->full_path = NULL;
2819 INIT_LIST_HEAD(&new->list);
2820 list_add_tail(&new->list, list);
2821 return 0;
2824 static void __free_recorded_refs(struct list_head *head)
2826 struct recorded_ref *cur;
2828 while (!list_empty(head)) {
2829 cur = list_entry(head->next, struct recorded_ref, list);
2830 fs_path_free(cur->full_path);
2831 list_del(&cur->list);
2832 kfree(cur);
2836 static void free_recorded_refs(struct send_ctx *sctx)
2838 __free_recorded_refs(&sctx->new_refs);
2839 __free_recorded_refs(&sctx->deleted_refs);
2843 * Renames/moves a file/dir to its orphan name. Used when the first
2844 * ref of an unprocessed inode gets overwritten and for all non empty
2845 * directories.
2847 static int orphanize_inode(struct send_ctx *sctx, u64 ino, u64 gen,
2848 struct fs_path *path)
2850 int ret;
2851 struct fs_path *orphan;
2853 orphan = fs_path_alloc();
2854 if (!orphan)
2855 return -ENOMEM;
2857 ret = gen_unique_name(sctx, ino, gen, orphan);
2858 if (ret < 0)
2859 goto out;
2861 ret = send_rename(sctx, path, orphan);
2863 out:
2864 fs_path_free(orphan);
2865 return ret;
2868 static struct orphan_dir_info *
2869 add_orphan_dir_info(struct send_ctx *sctx, u64 dir_ino)
2871 struct rb_node **p = &sctx->orphan_dirs.rb_node;
2872 struct rb_node *parent = NULL;
2873 struct orphan_dir_info *entry, *odi;
2875 while (*p) {
2876 parent = *p;
2877 entry = rb_entry(parent, struct orphan_dir_info, node);
2878 if (dir_ino < entry->ino) {
2879 p = &(*p)->rb_left;
2880 } else if (dir_ino > entry->ino) {
2881 p = &(*p)->rb_right;
2882 } else {
2883 return entry;
2887 odi = kmalloc(sizeof(*odi), GFP_KERNEL);
2888 if (!odi)
2889 return ERR_PTR(-ENOMEM);
2890 odi->ino = dir_ino;
2891 odi->gen = 0;
2892 odi->last_dir_index_offset = 0;
2894 rb_link_node(&odi->node, parent, p);
2895 rb_insert_color(&odi->node, &sctx->orphan_dirs);
2896 return odi;
2899 static struct orphan_dir_info *
2900 get_orphan_dir_info(struct send_ctx *sctx, u64 dir_ino)
2902 struct rb_node *n = sctx->orphan_dirs.rb_node;
2903 struct orphan_dir_info *entry;
2905 while (n) {
2906 entry = rb_entry(n, struct orphan_dir_info, node);
2907 if (dir_ino < entry->ino)
2908 n = n->rb_left;
2909 else if (dir_ino > entry->ino)
2910 n = n->rb_right;
2911 else
2912 return entry;
2914 return NULL;
2917 static int is_waiting_for_rm(struct send_ctx *sctx, u64 dir_ino)
2919 struct orphan_dir_info *odi = get_orphan_dir_info(sctx, dir_ino);
2921 return odi != NULL;
2924 static void free_orphan_dir_info(struct send_ctx *sctx,
2925 struct orphan_dir_info *odi)
2927 if (!odi)
2928 return;
2929 rb_erase(&odi->node, &sctx->orphan_dirs);
2930 kfree(odi);
2934 * Returns 1 if a directory can be removed at this point in time.
2935 * We check this by iterating all dir items and checking if the inode behind
2936 * the dir item was already processed.
2938 static int can_rmdir(struct send_ctx *sctx, u64 dir, u64 dir_gen,
2939 u64 send_progress)
2941 int ret = 0;
2942 struct btrfs_root *root = sctx->parent_root;
2943 struct btrfs_path *path;
2944 struct btrfs_key key;
2945 struct btrfs_key found_key;
2946 struct btrfs_key loc;
2947 struct btrfs_dir_item *di;
2948 struct orphan_dir_info *odi = NULL;
2951 * Don't try to rmdir the top/root subvolume dir.
2953 if (dir == BTRFS_FIRST_FREE_OBJECTID)
2954 return 0;
2956 path = alloc_path_for_send();
2957 if (!path)
2958 return -ENOMEM;
2960 key.objectid = dir;
2961 key.type = BTRFS_DIR_INDEX_KEY;
2962 key.offset = 0;
2964 odi = get_orphan_dir_info(sctx, dir);
2965 if (odi)
2966 key.offset = odi->last_dir_index_offset;
2968 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2969 if (ret < 0)
2970 goto out;
2972 while (1) {
2973 struct waiting_dir_move *dm;
2975 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
2976 ret = btrfs_next_leaf(root, path);
2977 if (ret < 0)
2978 goto out;
2979 else if (ret > 0)
2980 break;
2981 continue;
2983 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
2984 path->slots[0]);
2985 if (found_key.objectid != key.objectid ||
2986 found_key.type != key.type)
2987 break;
2989 di = btrfs_item_ptr(path->nodes[0], path->slots[0],
2990 struct btrfs_dir_item);
2991 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &loc);
2993 dm = get_waiting_dir_move(sctx, loc.objectid);
2994 if (dm) {
2995 odi = add_orphan_dir_info(sctx, dir);
2996 if (IS_ERR(odi)) {
2997 ret = PTR_ERR(odi);
2998 goto out;
3000 odi->gen = dir_gen;
3001 odi->last_dir_index_offset = found_key.offset;
3002 dm->rmdir_ino = dir;
3003 ret = 0;
3004 goto out;
3007 if (loc.objectid > send_progress) {
3008 odi = add_orphan_dir_info(sctx, dir);
3009 if (IS_ERR(odi)) {
3010 ret = PTR_ERR(odi);
3011 goto out;
3013 odi->gen = dir_gen;
3014 odi->last_dir_index_offset = found_key.offset;
3015 ret = 0;
3016 goto out;
3019 path->slots[0]++;
3021 free_orphan_dir_info(sctx, odi);
3023 ret = 1;
3025 out:
3026 btrfs_free_path(path);
3027 return ret;
3030 static int is_waiting_for_move(struct send_ctx *sctx, u64 ino)
3032 struct waiting_dir_move *entry = get_waiting_dir_move(sctx, ino);
3034 return entry != NULL;
3037 static int add_waiting_dir_move(struct send_ctx *sctx, u64 ino, bool orphanized)
3039 struct rb_node **p = &sctx->waiting_dir_moves.rb_node;
3040 struct rb_node *parent = NULL;
3041 struct waiting_dir_move *entry, *dm;
3043 dm = kmalloc(sizeof(*dm), GFP_KERNEL);
3044 if (!dm)
3045 return -ENOMEM;
3046 dm->ino = ino;
3047 dm->rmdir_ino = 0;
3048 dm->orphanized = orphanized;
3050 while (*p) {
3051 parent = *p;
3052 entry = rb_entry(parent, struct waiting_dir_move, node);
3053 if (ino < entry->ino) {
3054 p = &(*p)->rb_left;
3055 } else if (ino > entry->ino) {
3056 p = &(*p)->rb_right;
3057 } else {
3058 kfree(dm);
3059 return -EEXIST;
3063 rb_link_node(&dm->node, parent, p);
3064 rb_insert_color(&dm->node, &sctx->waiting_dir_moves);
3065 return 0;
3068 static struct waiting_dir_move *
3069 get_waiting_dir_move(struct send_ctx *sctx, u64 ino)
3071 struct rb_node *n = sctx->waiting_dir_moves.rb_node;
3072 struct waiting_dir_move *entry;
3074 while (n) {
3075 entry = rb_entry(n, struct waiting_dir_move, node);
3076 if (ino < entry->ino)
3077 n = n->rb_left;
3078 else if (ino > entry->ino)
3079 n = n->rb_right;
3080 else
3081 return entry;
3083 return NULL;
3086 static void free_waiting_dir_move(struct send_ctx *sctx,
3087 struct waiting_dir_move *dm)
3089 if (!dm)
3090 return;
3091 rb_erase(&dm->node, &sctx->waiting_dir_moves);
3092 kfree(dm);
3095 static int add_pending_dir_move(struct send_ctx *sctx,
3096 u64 ino,
3097 u64 ino_gen,
3098 u64 parent_ino,
3099 struct list_head *new_refs,
3100 struct list_head *deleted_refs,
3101 const bool is_orphan)
3103 struct rb_node **p = &sctx->pending_dir_moves.rb_node;
3104 struct rb_node *parent = NULL;
3105 struct pending_dir_move *entry = NULL, *pm;
3106 struct recorded_ref *cur;
3107 int exists = 0;
3108 int ret;
3110 pm = kmalloc(sizeof(*pm), GFP_KERNEL);
3111 if (!pm)
3112 return -ENOMEM;
3113 pm->parent_ino = parent_ino;
3114 pm->ino = ino;
3115 pm->gen = ino_gen;
3116 INIT_LIST_HEAD(&pm->list);
3117 INIT_LIST_HEAD(&pm->update_refs);
3118 RB_CLEAR_NODE(&pm->node);
3120 while (*p) {
3121 parent = *p;
3122 entry = rb_entry(parent, struct pending_dir_move, node);
3123 if (parent_ino < entry->parent_ino) {
3124 p = &(*p)->rb_left;
3125 } else if (parent_ino > entry->parent_ino) {
3126 p = &(*p)->rb_right;
3127 } else {
3128 exists = 1;
3129 break;
3133 list_for_each_entry(cur, deleted_refs, list) {
3134 ret = dup_ref(cur, &pm->update_refs);
3135 if (ret < 0)
3136 goto out;
3138 list_for_each_entry(cur, new_refs, list) {
3139 ret = dup_ref(cur, &pm->update_refs);
3140 if (ret < 0)
3141 goto out;
3144 ret = add_waiting_dir_move(sctx, pm->ino, is_orphan);
3145 if (ret)
3146 goto out;
3148 if (exists) {
3149 list_add_tail(&pm->list, &entry->list);
3150 } else {
3151 rb_link_node(&pm->node, parent, p);
3152 rb_insert_color(&pm->node, &sctx->pending_dir_moves);
3154 ret = 0;
3155 out:
3156 if (ret) {
3157 __free_recorded_refs(&pm->update_refs);
3158 kfree(pm);
3160 return ret;
3163 static struct pending_dir_move *get_pending_dir_moves(struct send_ctx *sctx,
3164 u64 parent_ino)
3166 struct rb_node *n = sctx->pending_dir_moves.rb_node;
3167 struct pending_dir_move *entry;
3169 while (n) {
3170 entry = rb_entry(n, struct pending_dir_move, node);
3171 if (parent_ino < entry->parent_ino)
3172 n = n->rb_left;
3173 else if (parent_ino > entry->parent_ino)
3174 n = n->rb_right;
3175 else
3176 return entry;
3178 return NULL;
3181 static int path_loop(struct send_ctx *sctx, struct fs_path *name,
3182 u64 ino, u64 gen, u64 *ancestor_ino)
3184 int ret = 0;
3185 u64 parent_inode = 0;
3186 u64 parent_gen = 0;
3187 u64 start_ino = ino;
3189 *ancestor_ino = 0;
3190 while (ino != BTRFS_FIRST_FREE_OBJECTID) {
3191 fs_path_reset(name);
3193 if (is_waiting_for_rm(sctx, ino))
3194 break;
3195 if (is_waiting_for_move(sctx, ino)) {
3196 if (*ancestor_ino == 0)
3197 *ancestor_ino = ino;
3198 ret = get_first_ref(sctx->parent_root, ino,
3199 &parent_inode, &parent_gen, name);
3200 } else {
3201 ret = __get_cur_name_and_parent(sctx, ino, gen,
3202 &parent_inode,
3203 &parent_gen, name);
3204 if (ret > 0) {
3205 ret = 0;
3206 break;
3209 if (ret < 0)
3210 break;
3211 if (parent_inode == start_ino) {
3212 ret = 1;
3213 if (*ancestor_ino == 0)
3214 *ancestor_ino = ino;
3215 break;
3217 ino = parent_inode;
3218 gen = parent_gen;
3220 return ret;
3223 static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm)
3225 struct fs_path *from_path = NULL;
3226 struct fs_path *to_path = NULL;
3227 struct fs_path *name = NULL;
3228 u64 orig_progress = sctx->send_progress;
3229 struct recorded_ref *cur;
3230 u64 parent_ino, parent_gen;
3231 struct waiting_dir_move *dm = NULL;
3232 u64 rmdir_ino = 0;
3233 u64 ancestor;
3234 bool is_orphan;
3235 int ret;
3237 name = fs_path_alloc();
3238 from_path = fs_path_alloc();
3239 if (!name || !from_path) {
3240 ret = -ENOMEM;
3241 goto out;
3244 dm = get_waiting_dir_move(sctx, pm->ino);
3245 ASSERT(dm);
3246 rmdir_ino = dm->rmdir_ino;
3247 is_orphan = dm->orphanized;
3248 free_waiting_dir_move(sctx, dm);
3250 if (is_orphan) {
3251 ret = gen_unique_name(sctx, pm->ino,
3252 pm->gen, from_path);
3253 } else {
3254 ret = get_first_ref(sctx->parent_root, pm->ino,
3255 &parent_ino, &parent_gen, name);
3256 if (ret < 0)
3257 goto out;
3258 ret = get_cur_path(sctx, parent_ino, parent_gen,
3259 from_path);
3260 if (ret < 0)
3261 goto out;
3262 ret = fs_path_add_path(from_path, name);
3264 if (ret < 0)
3265 goto out;
3267 sctx->send_progress = sctx->cur_ino + 1;
3268 ret = path_loop(sctx, name, pm->ino, pm->gen, &ancestor);
3269 if (ret < 0)
3270 goto out;
3271 if (ret) {
3272 LIST_HEAD(deleted_refs);
3273 ASSERT(ancestor > BTRFS_FIRST_FREE_OBJECTID);
3274 ret = add_pending_dir_move(sctx, pm->ino, pm->gen, ancestor,
3275 &pm->update_refs, &deleted_refs,
3276 is_orphan);
3277 if (ret < 0)
3278 goto out;
3279 if (rmdir_ino) {
3280 dm = get_waiting_dir_move(sctx, pm->ino);
3281 ASSERT(dm);
3282 dm->rmdir_ino = rmdir_ino;
3284 goto out;
3286 fs_path_reset(name);
3287 to_path = name;
3288 name = NULL;
3289 ret = get_cur_path(sctx, pm->ino, pm->gen, to_path);
3290 if (ret < 0)
3291 goto out;
3293 ret = send_rename(sctx, from_path, to_path);
3294 if (ret < 0)
3295 goto out;
3297 if (rmdir_ino) {
3298 struct orphan_dir_info *odi;
3299 u64 gen;
3301 odi = get_orphan_dir_info(sctx, rmdir_ino);
3302 if (!odi) {
3303 /* already deleted */
3304 goto finish;
3306 gen = odi->gen;
3308 ret = can_rmdir(sctx, rmdir_ino, gen, sctx->cur_ino);
3309 if (ret < 0)
3310 goto out;
3311 if (!ret)
3312 goto finish;
3314 name = fs_path_alloc();
3315 if (!name) {
3316 ret = -ENOMEM;
3317 goto out;
3319 ret = get_cur_path(sctx, rmdir_ino, gen, name);
3320 if (ret < 0)
3321 goto out;
3322 ret = send_rmdir(sctx, name);
3323 if (ret < 0)
3324 goto out;
3327 finish:
3328 ret = send_utimes(sctx, pm->ino, pm->gen);
3329 if (ret < 0)
3330 goto out;
3333 * After rename/move, need to update the utimes of both new parent(s)
3334 * and old parent(s).
3336 list_for_each_entry(cur, &pm->update_refs, list) {
3338 * The parent inode might have been deleted in the send snapshot
3340 ret = get_inode_info(sctx->send_root, cur->dir, NULL,
3341 NULL, NULL, NULL, NULL, NULL);
3342 if (ret == -ENOENT) {
3343 ret = 0;
3344 continue;
3346 if (ret < 0)
3347 goto out;
3349 ret = send_utimes(sctx, cur->dir, cur->dir_gen);
3350 if (ret < 0)
3351 goto out;
3354 out:
3355 fs_path_free(name);
3356 fs_path_free(from_path);
3357 fs_path_free(to_path);
3358 sctx->send_progress = orig_progress;
3360 return ret;
3363 static void free_pending_move(struct send_ctx *sctx, struct pending_dir_move *m)
3365 if (!list_empty(&m->list))
3366 list_del(&m->list);
3367 if (!RB_EMPTY_NODE(&m->node))
3368 rb_erase(&m->node, &sctx->pending_dir_moves);
3369 __free_recorded_refs(&m->update_refs);
3370 kfree(m);
3373 static void tail_append_pending_moves(struct send_ctx *sctx,
3374 struct pending_dir_move *moves,
3375 struct list_head *stack)
3377 if (list_empty(&moves->list)) {
3378 list_add_tail(&moves->list, stack);
3379 } else {
3380 LIST_HEAD(list);
3381 list_splice_init(&moves->list, &list);
3382 list_add_tail(&moves->list, stack);
3383 list_splice_tail(&list, stack);
3385 if (!RB_EMPTY_NODE(&moves->node)) {
3386 rb_erase(&moves->node, &sctx->pending_dir_moves);
3387 RB_CLEAR_NODE(&moves->node);
3391 static int apply_children_dir_moves(struct send_ctx *sctx)
3393 struct pending_dir_move *pm;
3394 struct list_head stack;
3395 u64 parent_ino = sctx->cur_ino;
3396 int ret = 0;
3398 pm = get_pending_dir_moves(sctx, parent_ino);
3399 if (!pm)
3400 return 0;
3402 INIT_LIST_HEAD(&stack);
3403 tail_append_pending_moves(sctx, pm, &stack);
3405 while (!list_empty(&stack)) {
3406 pm = list_first_entry(&stack, struct pending_dir_move, list);
3407 parent_ino = pm->ino;
3408 ret = apply_dir_move(sctx, pm);
3409 free_pending_move(sctx, pm);
3410 if (ret)
3411 goto out;
3412 pm = get_pending_dir_moves(sctx, parent_ino);
3413 if (pm)
3414 tail_append_pending_moves(sctx, pm, &stack);
3416 return 0;
3418 out:
3419 while (!list_empty(&stack)) {
3420 pm = list_first_entry(&stack, struct pending_dir_move, list);
3421 free_pending_move(sctx, pm);
3423 return ret;
3427 * We might need to delay a directory rename even when no ancestor directory
3428 * (in the send root) with a higher inode number than ours (sctx->cur_ino) was
3429 * renamed. This happens when we rename a directory to the old name (the name
3430 * in the parent root) of some other unrelated directory that got its rename
3431 * delayed due to some ancestor with higher number that got renamed.
3433 * Example:
3435 * Parent snapshot:
3436 * . (ino 256)
3437 * |---- a/ (ino 257)
3438 * | |---- file (ino 260)
3440 * |---- b/ (ino 258)
3441 * |---- c/ (ino 259)
3443 * Send snapshot:
3444 * . (ino 256)
3445 * |---- a/ (ino 258)
3446 * |---- x/ (ino 259)
3447 * |---- y/ (ino 257)
3448 * |----- file (ino 260)
3450 * Here we can not rename 258 from 'b' to 'a' without the rename of inode 257
3451 * from 'a' to 'x/y' happening first, which in turn depends on the rename of
3452 * inode 259 from 'c' to 'x'. So the order of rename commands the send stream
3453 * must issue is:
3455 * 1 - rename 259 from 'c' to 'x'
3456 * 2 - rename 257 from 'a' to 'x/y'
3457 * 3 - rename 258 from 'b' to 'a'
3459 * Returns 1 if the rename of sctx->cur_ino needs to be delayed, 0 if it can
3460 * be done right away and < 0 on error.
3462 static int wait_for_dest_dir_move(struct send_ctx *sctx,
3463 struct recorded_ref *parent_ref,
3464 const bool is_orphan)
3466 struct btrfs_fs_info *fs_info = sctx->parent_root->fs_info;
3467 struct btrfs_path *path;
3468 struct btrfs_key key;
3469 struct btrfs_key di_key;
3470 struct btrfs_dir_item *di;
3471 u64 left_gen;
3472 u64 right_gen;
3473 int ret = 0;
3474 struct waiting_dir_move *wdm;
3476 if (RB_EMPTY_ROOT(&sctx->waiting_dir_moves))
3477 return 0;
3479 path = alloc_path_for_send();
3480 if (!path)
3481 return -ENOMEM;
3483 key.objectid = parent_ref->dir;
3484 key.type = BTRFS_DIR_ITEM_KEY;
3485 key.offset = btrfs_name_hash(parent_ref->name, parent_ref->name_len);
3487 ret = btrfs_search_slot(NULL, sctx->parent_root, &key, path, 0, 0);
3488 if (ret < 0) {
3489 goto out;
3490 } else if (ret > 0) {
3491 ret = 0;
3492 goto out;
3495 di = btrfs_match_dir_item_name(fs_info, path, parent_ref->name,
3496 parent_ref->name_len);
3497 if (!di) {
3498 ret = 0;
3499 goto out;
3502 * di_key.objectid has the number of the inode that has a dentry in the
3503 * parent directory with the same name that sctx->cur_ino is being
3504 * renamed to. We need to check if that inode is in the send root as
3505 * well and if it is currently marked as an inode with a pending rename,
3506 * if it is, we need to delay the rename of sctx->cur_ino as well, so
3507 * that it happens after that other inode is renamed.
3509 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &di_key);
3510 if (di_key.type != BTRFS_INODE_ITEM_KEY) {
3511 ret = 0;
3512 goto out;
3515 ret = get_inode_info(sctx->parent_root, di_key.objectid, NULL,
3516 &left_gen, NULL, NULL, NULL, NULL);
3517 if (ret < 0)
3518 goto out;
3519 ret = get_inode_info(sctx->send_root, di_key.objectid, NULL,
3520 &right_gen, NULL, NULL, NULL, NULL);
3521 if (ret < 0) {
3522 if (ret == -ENOENT)
3523 ret = 0;
3524 goto out;
3527 /* Different inode, no need to delay the rename of sctx->cur_ino */
3528 if (right_gen != left_gen) {
3529 ret = 0;
3530 goto out;
3533 wdm = get_waiting_dir_move(sctx, di_key.objectid);
3534 if (wdm && !wdm->orphanized) {
3535 ret = add_pending_dir_move(sctx,
3536 sctx->cur_ino,
3537 sctx->cur_inode_gen,
3538 di_key.objectid,
3539 &sctx->new_refs,
3540 &sctx->deleted_refs,
3541 is_orphan);
3542 if (!ret)
3543 ret = 1;
3545 out:
3546 btrfs_free_path(path);
3547 return ret;
3551 * Check if inode ino2, or any of its ancestors, is inode ino1.
3552 * Return 1 if true, 0 if false and < 0 on error.
3554 static int check_ino_in_path(struct btrfs_root *root,
3555 const u64 ino1,
3556 const u64 ino1_gen,
3557 const u64 ino2,
3558 const u64 ino2_gen,
3559 struct fs_path *fs_path)
3561 u64 ino = ino2;
3563 if (ino1 == ino2)
3564 return ino1_gen == ino2_gen;
3566 while (ino > BTRFS_FIRST_FREE_OBJECTID) {
3567 u64 parent;
3568 u64 parent_gen;
3569 int ret;
3571 fs_path_reset(fs_path);
3572 ret = get_first_ref(root, ino, &parent, &parent_gen, fs_path);
3573 if (ret < 0)
3574 return ret;
3575 if (parent == ino1)
3576 return parent_gen == ino1_gen;
3577 ino = parent;
3579 return 0;
3583 * Check if ino ino1 is an ancestor of inode ino2 in the given root for any
3584 * possible path (in case ino2 is not a directory and has multiple hard links).
3585 * Return 1 if true, 0 if false and < 0 on error.
3587 static int is_ancestor(struct btrfs_root *root,
3588 const u64 ino1,
3589 const u64 ino1_gen,
3590 const u64 ino2,
3591 struct fs_path *fs_path)
3593 bool free_fs_path = false;
3594 int ret = 0;
3595 struct btrfs_path *path = NULL;
3596 struct btrfs_key key;
3598 if (!fs_path) {
3599 fs_path = fs_path_alloc();
3600 if (!fs_path)
3601 return -ENOMEM;
3602 free_fs_path = true;
3605 path = alloc_path_for_send();
3606 if (!path) {
3607 ret = -ENOMEM;
3608 goto out;
3611 key.objectid = ino2;
3612 key.type = BTRFS_INODE_REF_KEY;
3613 key.offset = 0;
3615 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3616 if (ret < 0)
3617 goto out;
3619 while (true) {
3620 struct extent_buffer *leaf = path->nodes[0];
3621 int slot = path->slots[0];
3622 u32 cur_offset = 0;
3623 u32 item_size;
3625 if (slot >= btrfs_header_nritems(leaf)) {
3626 ret = btrfs_next_leaf(root, path);
3627 if (ret < 0)
3628 goto out;
3629 if (ret > 0)
3630 break;
3631 continue;
3634 btrfs_item_key_to_cpu(leaf, &key, slot);
3635 if (key.objectid != ino2)
3636 break;
3637 if (key.type != BTRFS_INODE_REF_KEY &&
3638 key.type != BTRFS_INODE_EXTREF_KEY)
3639 break;
3641 item_size = btrfs_item_size_nr(leaf, slot);
3642 while (cur_offset < item_size) {
3643 u64 parent;
3644 u64 parent_gen;
3646 if (key.type == BTRFS_INODE_EXTREF_KEY) {
3647 unsigned long ptr;
3648 struct btrfs_inode_extref *extref;
3650 ptr = btrfs_item_ptr_offset(leaf, slot);
3651 extref = (struct btrfs_inode_extref *)
3652 (ptr + cur_offset);
3653 parent = btrfs_inode_extref_parent(leaf,
3654 extref);
3655 cur_offset += sizeof(*extref);
3656 cur_offset += btrfs_inode_extref_name_len(leaf,
3657 extref);
3658 } else {
3659 parent = key.offset;
3660 cur_offset = item_size;
3663 ret = get_inode_info(root, parent, NULL, &parent_gen,
3664 NULL, NULL, NULL, NULL);
3665 if (ret < 0)
3666 goto out;
3667 ret = check_ino_in_path(root, ino1, ino1_gen,
3668 parent, parent_gen, fs_path);
3669 if (ret)
3670 goto out;
3672 path->slots[0]++;
3674 ret = 0;
3675 out:
3676 btrfs_free_path(path);
3677 if (free_fs_path)
3678 fs_path_free(fs_path);
3679 return ret;
3682 static int wait_for_parent_move(struct send_ctx *sctx,
3683 struct recorded_ref *parent_ref,
3684 const bool is_orphan)
3686 int ret = 0;
3687 u64 ino = parent_ref->dir;
3688 u64 ino_gen = parent_ref->dir_gen;
3689 u64 parent_ino_before, parent_ino_after;
3690 struct fs_path *path_before = NULL;
3691 struct fs_path *path_after = NULL;
3692 int len1, len2;
3694 path_after = fs_path_alloc();
3695 path_before = fs_path_alloc();
3696 if (!path_after || !path_before) {
3697 ret = -ENOMEM;
3698 goto out;
3702 * Our current directory inode may not yet be renamed/moved because some
3703 * ancestor (immediate or not) has to be renamed/moved first. So find if
3704 * such ancestor exists and make sure our own rename/move happens after
3705 * that ancestor is processed to avoid path build infinite loops (done
3706 * at get_cur_path()).
3708 while (ino > BTRFS_FIRST_FREE_OBJECTID) {
3709 u64 parent_ino_after_gen;
3711 if (is_waiting_for_move(sctx, ino)) {
3713 * If the current inode is an ancestor of ino in the
3714 * parent root, we need to delay the rename of the
3715 * current inode, otherwise don't delayed the rename
3716 * because we can end up with a circular dependency
3717 * of renames, resulting in some directories never
3718 * getting the respective rename operations issued in
3719 * the send stream or getting into infinite path build
3720 * loops.
3722 ret = is_ancestor(sctx->parent_root,
3723 sctx->cur_ino, sctx->cur_inode_gen,
3724 ino, path_before);
3725 if (ret)
3726 break;
3729 fs_path_reset(path_before);
3730 fs_path_reset(path_after);
3732 ret = get_first_ref(sctx->send_root, ino, &parent_ino_after,
3733 &parent_ino_after_gen, path_after);
3734 if (ret < 0)
3735 goto out;
3736 ret = get_first_ref(sctx->parent_root, ino, &parent_ino_before,
3737 NULL, path_before);
3738 if (ret < 0 && ret != -ENOENT) {
3739 goto out;
3740 } else if (ret == -ENOENT) {
3741 ret = 0;
3742 break;
3745 len1 = fs_path_len(path_before);
3746 len2 = fs_path_len(path_after);
3747 if (ino > sctx->cur_ino &&
3748 (parent_ino_before != parent_ino_after || len1 != len2 ||
3749 memcmp(path_before->start, path_after->start, len1))) {
3750 u64 parent_ino_gen;
3752 ret = get_inode_info(sctx->parent_root, ino, NULL,
3753 &parent_ino_gen, NULL, NULL, NULL,
3754 NULL);
3755 if (ret < 0)
3756 goto out;
3757 if (ino_gen == parent_ino_gen) {
3758 ret = 1;
3759 break;
3762 ino = parent_ino_after;
3763 ino_gen = parent_ino_after_gen;
3766 out:
3767 fs_path_free(path_before);
3768 fs_path_free(path_after);
3770 if (ret == 1) {
3771 ret = add_pending_dir_move(sctx,
3772 sctx->cur_ino,
3773 sctx->cur_inode_gen,
3774 ino,
3775 &sctx->new_refs,
3776 &sctx->deleted_refs,
3777 is_orphan);
3778 if (!ret)
3779 ret = 1;
3782 return ret;
3785 static int update_ref_path(struct send_ctx *sctx, struct recorded_ref *ref)
3787 int ret;
3788 struct fs_path *new_path;
3791 * Our reference's name member points to its full_path member string, so
3792 * we use here a new path.
3794 new_path = fs_path_alloc();
3795 if (!new_path)
3796 return -ENOMEM;
3798 ret = get_cur_path(sctx, ref->dir, ref->dir_gen, new_path);
3799 if (ret < 0) {
3800 fs_path_free(new_path);
3801 return ret;
3803 ret = fs_path_add(new_path, ref->name, ref->name_len);
3804 if (ret < 0) {
3805 fs_path_free(new_path);
3806 return ret;
3809 fs_path_free(ref->full_path);
3810 set_ref_path(ref, new_path);
3812 return 0;
3816 * This does all the move/link/unlink/rmdir magic.
3818 static int process_recorded_refs(struct send_ctx *sctx, int *pending_move)
3820 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
3821 int ret = 0;
3822 struct recorded_ref *cur;
3823 struct recorded_ref *cur2;
3824 struct list_head check_dirs;
3825 struct fs_path *valid_path = NULL;
3826 u64 ow_inode = 0;
3827 u64 ow_gen;
3828 u64 ow_mode;
3829 int did_overwrite = 0;
3830 int is_orphan = 0;
3831 u64 last_dir_ino_rm = 0;
3832 bool can_rename = true;
3833 bool orphanized_dir = false;
3834 bool orphanized_ancestor = false;
3836 btrfs_debug(fs_info, "process_recorded_refs %llu", sctx->cur_ino);
3839 * This should never happen as the root dir always has the same ref
3840 * which is always '..'
3842 BUG_ON(sctx->cur_ino <= BTRFS_FIRST_FREE_OBJECTID);
3843 INIT_LIST_HEAD(&check_dirs);
3845 valid_path = fs_path_alloc();
3846 if (!valid_path) {
3847 ret = -ENOMEM;
3848 goto out;
3852 * First, check if the first ref of the current inode was overwritten
3853 * before. If yes, we know that the current inode was already orphanized
3854 * and thus use the orphan name. If not, we can use get_cur_path to
3855 * get the path of the first ref as it would like while receiving at
3856 * this point in time.
3857 * New inodes are always orphan at the beginning, so force to use the
3858 * orphan name in this case.
3859 * The first ref is stored in valid_path and will be updated if it
3860 * gets moved around.
3862 if (!sctx->cur_inode_new) {
3863 ret = did_overwrite_first_ref(sctx, sctx->cur_ino,
3864 sctx->cur_inode_gen);
3865 if (ret < 0)
3866 goto out;
3867 if (ret)
3868 did_overwrite = 1;
3870 if (sctx->cur_inode_new || did_overwrite) {
3871 ret = gen_unique_name(sctx, sctx->cur_ino,
3872 sctx->cur_inode_gen, valid_path);
3873 if (ret < 0)
3874 goto out;
3875 is_orphan = 1;
3876 } else {
3877 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen,
3878 valid_path);
3879 if (ret < 0)
3880 goto out;
3883 list_for_each_entry(cur, &sctx->new_refs, list) {
3885 * We may have refs where the parent directory does not exist
3886 * yet. This happens if the parent directories inum is higher
3887 * than the current inum. To handle this case, we create the
3888 * parent directory out of order. But we need to check if this
3889 * did already happen before due to other refs in the same dir.
3891 ret = get_cur_inode_state(sctx, cur->dir, cur->dir_gen);
3892 if (ret < 0)
3893 goto out;
3894 if (ret == inode_state_will_create) {
3895 ret = 0;
3897 * First check if any of the current inodes refs did
3898 * already create the dir.
3900 list_for_each_entry(cur2, &sctx->new_refs, list) {
3901 if (cur == cur2)
3902 break;
3903 if (cur2->dir == cur->dir) {
3904 ret = 1;
3905 break;
3910 * If that did not happen, check if a previous inode
3911 * did already create the dir.
3913 if (!ret)
3914 ret = did_create_dir(sctx, cur->dir);
3915 if (ret < 0)
3916 goto out;
3917 if (!ret) {
3918 ret = send_create_inode(sctx, cur->dir);
3919 if (ret < 0)
3920 goto out;
3925 * Check if this new ref would overwrite the first ref of
3926 * another unprocessed inode. If yes, orphanize the
3927 * overwritten inode. If we find an overwritten ref that is
3928 * not the first ref, simply unlink it.
3930 ret = will_overwrite_ref(sctx, cur->dir, cur->dir_gen,
3931 cur->name, cur->name_len,
3932 &ow_inode, &ow_gen, &ow_mode);
3933 if (ret < 0)
3934 goto out;
3935 if (ret) {
3936 ret = is_first_ref(sctx->parent_root,
3937 ow_inode, cur->dir, cur->name,
3938 cur->name_len);
3939 if (ret < 0)
3940 goto out;
3941 if (ret) {
3942 struct name_cache_entry *nce;
3943 struct waiting_dir_move *wdm;
3945 ret = orphanize_inode(sctx, ow_inode, ow_gen,
3946 cur->full_path);
3947 if (ret < 0)
3948 goto out;
3949 if (S_ISDIR(ow_mode))
3950 orphanized_dir = true;
3953 * If ow_inode has its rename operation delayed
3954 * make sure that its orphanized name is used in
3955 * the source path when performing its rename
3956 * operation.
3958 if (is_waiting_for_move(sctx, ow_inode)) {
3959 wdm = get_waiting_dir_move(sctx,
3960 ow_inode);
3961 ASSERT(wdm);
3962 wdm->orphanized = true;
3966 * Make sure we clear our orphanized inode's
3967 * name from the name cache. This is because the
3968 * inode ow_inode might be an ancestor of some
3969 * other inode that will be orphanized as well
3970 * later and has an inode number greater than
3971 * sctx->send_progress. We need to prevent
3972 * future name lookups from using the old name
3973 * and get instead the orphan name.
3975 nce = name_cache_search(sctx, ow_inode, ow_gen);
3976 if (nce) {
3977 name_cache_delete(sctx, nce);
3978 kfree(nce);
3982 * ow_inode might currently be an ancestor of
3983 * cur_ino, therefore compute valid_path (the
3984 * current path of cur_ino) again because it
3985 * might contain the pre-orphanization name of
3986 * ow_inode, which is no longer valid.
3988 ret = is_ancestor(sctx->parent_root,
3989 ow_inode, ow_gen,
3990 sctx->cur_ino, NULL);
3991 if (ret > 0) {
3992 orphanized_ancestor = true;
3993 fs_path_reset(valid_path);
3994 ret = get_cur_path(sctx, sctx->cur_ino,
3995 sctx->cur_inode_gen,
3996 valid_path);
3998 if (ret < 0)
3999 goto out;
4000 } else {
4001 ret = send_unlink(sctx, cur->full_path);
4002 if (ret < 0)
4003 goto out;
4007 if (S_ISDIR(sctx->cur_inode_mode) && sctx->parent_root) {
4008 ret = wait_for_dest_dir_move(sctx, cur, is_orphan);
4009 if (ret < 0)
4010 goto out;
4011 if (ret == 1) {
4012 can_rename = false;
4013 *pending_move = 1;
4017 if (S_ISDIR(sctx->cur_inode_mode) && sctx->parent_root &&
4018 can_rename) {
4019 ret = wait_for_parent_move(sctx, cur, is_orphan);
4020 if (ret < 0)
4021 goto out;
4022 if (ret == 1) {
4023 can_rename = false;
4024 *pending_move = 1;
4029 * link/move the ref to the new place. If we have an orphan
4030 * inode, move it and update valid_path. If not, link or move
4031 * it depending on the inode mode.
4033 if (is_orphan && can_rename) {
4034 ret = send_rename(sctx, valid_path, cur->full_path);
4035 if (ret < 0)
4036 goto out;
4037 is_orphan = 0;
4038 ret = fs_path_copy(valid_path, cur->full_path);
4039 if (ret < 0)
4040 goto out;
4041 } else if (can_rename) {
4042 if (S_ISDIR(sctx->cur_inode_mode)) {
4044 * Dirs can't be linked, so move it. For moved
4045 * dirs, we always have one new and one deleted
4046 * ref. The deleted ref is ignored later.
4048 ret = send_rename(sctx, valid_path,
4049 cur->full_path);
4050 if (!ret)
4051 ret = fs_path_copy(valid_path,
4052 cur->full_path);
4053 if (ret < 0)
4054 goto out;
4055 } else {
4057 * We might have previously orphanized an inode
4058 * which is an ancestor of our current inode,
4059 * so our reference's full path, which was
4060 * computed before any such orphanizations, must
4061 * be updated.
4063 if (orphanized_dir) {
4064 ret = update_ref_path(sctx, cur);
4065 if (ret < 0)
4066 goto out;
4068 ret = send_link(sctx, cur->full_path,
4069 valid_path);
4070 if (ret < 0)
4071 goto out;
4074 ret = dup_ref(cur, &check_dirs);
4075 if (ret < 0)
4076 goto out;
4079 if (S_ISDIR(sctx->cur_inode_mode) && sctx->cur_inode_deleted) {
4081 * Check if we can already rmdir the directory. If not,
4082 * orphanize it. For every dir item inside that gets deleted
4083 * later, we do this check again and rmdir it then if possible.
4084 * See the use of check_dirs for more details.
4086 ret = can_rmdir(sctx, sctx->cur_ino, sctx->cur_inode_gen,
4087 sctx->cur_ino);
4088 if (ret < 0)
4089 goto out;
4090 if (ret) {
4091 ret = send_rmdir(sctx, valid_path);
4092 if (ret < 0)
4093 goto out;
4094 } else if (!is_orphan) {
4095 ret = orphanize_inode(sctx, sctx->cur_ino,
4096 sctx->cur_inode_gen, valid_path);
4097 if (ret < 0)
4098 goto out;
4099 is_orphan = 1;
4102 list_for_each_entry(cur, &sctx->deleted_refs, list) {
4103 ret = dup_ref(cur, &check_dirs);
4104 if (ret < 0)
4105 goto out;
4107 } else if (S_ISDIR(sctx->cur_inode_mode) &&
4108 !list_empty(&sctx->deleted_refs)) {
4110 * We have a moved dir. Add the old parent to check_dirs
4112 cur = list_entry(sctx->deleted_refs.next, struct recorded_ref,
4113 list);
4114 ret = dup_ref(cur, &check_dirs);
4115 if (ret < 0)
4116 goto out;
4117 } else if (!S_ISDIR(sctx->cur_inode_mode)) {
4119 * We have a non dir inode. Go through all deleted refs and
4120 * unlink them if they were not already overwritten by other
4121 * inodes.
4123 list_for_each_entry(cur, &sctx->deleted_refs, list) {
4124 ret = did_overwrite_ref(sctx, cur->dir, cur->dir_gen,
4125 sctx->cur_ino, sctx->cur_inode_gen,
4126 cur->name, cur->name_len);
4127 if (ret < 0)
4128 goto out;
4129 if (!ret) {
4131 * If we orphanized any ancestor before, we need
4132 * to recompute the full path for deleted names,
4133 * since any such path was computed before we
4134 * processed any references and orphanized any
4135 * ancestor inode.
4137 if (orphanized_ancestor) {
4138 ret = update_ref_path(sctx, cur);
4139 if (ret < 0)
4140 goto out;
4142 ret = send_unlink(sctx, cur->full_path);
4143 if (ret < 0)
4144 goto out;
4146 ret = dup_ref(cur, &check_dirs);
4147 if (ret < 0)
4148 goto out;
4151 * If the inode is still orphan, unlink the orphan. This may
4152 * happen when a previous inode did overwrite the first ref
4153 * of this inode and no new refs were added for the current
4154 * inode. Unlinking does not mean that the inode is deleted in
4155 * all cases. There may still be links to this inode in other
4156 * places.
4158 if (is_orphan) {
4159 ret = send_unlink(sctx, valid_path);
4160 if (ret < 0)
4161 goto out;
4166 * We did collect all parent dirs where cur_inode was once located. We
4167 * now go through all these dirs and check if they are pending for
4168 * deletion and if it's finally possible to perform the rmdir now.
4169 * We also update the inode stats of the parent dirs here.
4171 list_for_each_entry(cur, &check_dirs, list) {
4173 * In case we had refs into dirs that were not processed yet,
4174 * we don't need to do the utime and rmdir logic for these dirs.
4175 * The dir will be processed later.
4177 if (cur->dir > sctx->cur_ino)
4178 continue;
4180 ret = get_cur_inode_state(sctx, cur->dir, cur->dir_gen);
4181 if (ret < 0)
4182 goto out;
4184 if (ret == inode_state_did_create ||
4185 ret == inode_state_no_change) {
4186 /* TODO delayed utimes */
4187 ret = send_utimes(sctx, cur->dir, cur->dir_gen);
4188 if (ret < 0)
4189 goto out;
4190 } else if (ret == inode_state_did_delete &&
4191 cur->dir != last_dir_ino_rm) {
4192 ret = can_rmdir(sctx, cur->dir, cur->dir_gen,
4193 sctx->cur_ino);
4194 if (ret < 0)
4195 goto out;
4196 if (ret) {
4197 ret = get_cur_path(sctx, cur->dir,
4198 cur->dir_gen, valid_path);
4199 if (ret < 0)
4200 goto out;
4201 ret = send_rmdir(sctx, valid_path);
4202 if (ret < 0)
4203 goto out;
4204 last_dir_ino_rm = cur->dir;
4209 ret = 0;
4211 out:
4212 __free_recorded_refs(&check_dirs);
4213 free_recorded_refs(sctx);
4214 fs_path_free(valid_path);
4215 return ret;
4218 static int record_ref(struct btrfs_root *root, u64 dir, struct fs_path *name,
4219 void *ctx, struct list_head *refs)
4221 int ret = 0;
4222 struct send_ctx *sctx = ctx;
4223 struct fs_path *p;
4224 u64 gen;
4226 p = fs_path_alloc();
4227 if (!p)
4228 return -ENOMEM;
4230 ret = get_inode_info(root, dir, NULL, &gen, NULL, NULL,
4231 NULL, NULL);
4232 if (ret < 0)
4233 goto out;
4235 ret = get_cur_path(sctx, dir, gen, p);
4236 if (ret < 0)
4237 goto out;
4238 ret = fs_path_add_path(p, name);
4239 if (ret < 0)
4240 goto out;
4242 ret = __record_ref(refs, dir, gen, p);
4244 out:
4245 if (ret)
4246 fs_path_free(p);
4247 return ret;
4250 static int __record_new_ref(int num, u64 dir, int index,
4251 struct fs_path *name,
4252 void *ctx)
4254 struct send_ctx *sctx = ctx;
4255 return record_ref(sctx->send_root, dir, name, ctx, &sctx->new_refs);
4259 static int __record_deleted_ref(int num, u64 dir, int index,
4260 struct fs_path *name,
4261 void *ctx)
4263 struct send_ctx *sctx = ctx;
4264 return record_ref(sctx->parent_root, dir, name, ctx,
4265 &sctx->deleted_refs);
4268 static int record_new_ref(struct send_ctx *sctx)
4270 int ret;
4272 ret = iterate_inode_ref(sctx->send_root, sctx->left_path,
4273 sctx->cmp_key, 0, __record_new_ref, sctx);
4274 if (ret < 0)
4275 goto out;
4276 ret = 0;
4278 out:
4279 return ret;
4282 static int record_deleted_ref(struct send_ctx *sctx)
4284 int ret;
4286 ret = iterate_inode_ref(sctx->parent_root, sctx->right_path,
4287 sctx->cmp_key, 0, __record_deleted_ref, sctx);
4288 if (ret < 0)
4289 goto out;
4290 ret = 0;
4292 out:
4293 return ret;
4296 struct find_ref_ctx {
4297 u64 dir;
4298 u64 dir_gen;
4299 struct btrfs_root *root;
4300 struct fs_path *name;
4301 int found_idx;
4304 static int __find_iref(int num, u64 dir, int index,
4305 struct fs_path *name,
4306 void *ctx_)
4308 struct find_ref_ctx *ctx = ctx_;
4309 u64 dir_gen;
4310 int ret;
4312 if (dir == ctx->dir && fs_path_len(name) == fs_path_len(ctx->name) &&
4313 strncmp(name->start, ctx->name->start, fs_path_len(name)) == 0) {
4315 * To avoid doing extra lookups we'll only do this if everything
4316 * else matches.
4318 ret = get_inode_info(ctx->root, dir, NULL, &dir_gen, NULL,
4319 NULL, NULL, NULL);
4320 if (ret)
4321 return ret;
4322 if (dir_gen != ctx->dir_gen)
4323 return 0;
4324 ctx->found_idx = num;
4325 return 1;
4327 return 0;
4330 static int find_iref(struct btrfs_root *root,
4331 struct btrfs_path *path,
4332 struct btrfs_key *key,
4333 u64 dir, u64 dir_gen, struct fs_path *name)
4335 int ret;
4336 struct find_ref_ctx ctx;
4338 ctx.dir = dir;
4339 ctx.name = name;
4340 ctx.dir_gen = dir_gen;
4341 ctx.found_idx = -1;
4342 ctx.root = root;
4344 ret = iterate_inode_ref(root, path, key, 0, __find_iref, &ctx);
4345 if (ret < 0)
4346 return ret;
4348 if (ctx.found_idx == -1)
4349 return -ENOENT;
4351 return ctx.found_idx;
4354 static int __record_changed_new_ref(int num, u64 dir, int index,
4355 struct fs_path *name,
4356 void *ctx)
4358 u64 dir_gen;
4359 int ret;
4360 struct send_ctx *sctx = ctx;
4362 ret = get_inode_info(sctx->send_root, dir, NULL, &dir_gen, NULL,
4363 NULL, NULL, NULL);
4364 if (ret)
4365 return ret;
4367 ret = find_iref(sctx->parent_root, sctx->right_path,
4368 sctx->cmp_key, dir, dir_gen, name);
4369 if (ret == -ENOENT)
4370 ret = __record_new_ref(num, dir, index, name, sctx);
4371 else if (ret > 0)
4372 ret = 0;
4374 return ret;
4377 static int __record_changed_deleted_ref(int num, u64 dir, int index,
4378 struct fs_path *name,
4379 void *ctx)
4381 u64 dir_gen;
4382 int ret;
4383 struct send_ctx *sctx = ctx;
4385 ret = get_inode_info(sctx->parent_root, dir, NULL, &dir_gen, NULL,
4386 NULL, NULL, NULL);
4387 if (ret)
4388 return ret;
4390 ret = find_iref(sctx->send_root, sctx->left_path, sctx->cmp_key,
4391 dir, dir_gen, name);
4392 if (ret == -ENOENT)
4393 ret = __record_deleted_ref(num, dir, index, name, sctx);
4394 else if (ret > 0)
4395 ret = 0;
4397 return ret;
4400 static int record_changed_ref(struct send_ctx *sctx)
4402 int ret = 0;
4404 ret = iterate_inode_ref(sctx->send_root, sctx->left_path,
4405 sctx->cmp_key, 0, __record_changed_new_ref, sctx);
4406 if (ret < 0)
4407 goto out;
4408 ret = iterate_inode_ref(sctx->parent_root, sctx->right_path,
4409 sctx->cmp_key, 0, __record_changed_deleted_ref, sctx);
4410 if (ret < 0)
4411 goto out;
4412 ret = 0;
4414 out:
4415 return ret;
4419 * Record and process all refs at once. Needed when an inode changes the
4420 * generation number, which means that it was deleted and recreated.
4422 static int process_all_refs(struct send_ctx *sctx,
4423 enum btrfs_compare_tree_result cmd)
4425 int ret;
4426 struct btrfs_root *root;
4427 struct btrfs_path *path;
4428 struct btrfs_key key;
4429 struct btrfs_key found_key;
4430 struct extent_buffer *eb;
4431 int slot;
4432 iterate_inode_ref_t cb;
4433 int pending_move = 0;
4435 path = alloc_path_for_send();
4436 if (!path)
4437 return -ENOMEM;
4439 if (cmd == BTRFS_COMPARE_TREE_NEW) {
4440 root = sctx->send_root;
4441 cb = __record_new_ref;
4442 } else if (cmd == BTRFS_COMPARE_TREE_DELETED) {
4443 root = sctx->parent_root;
4444 cb = __record_deleted_ref;
4445 } else {
4446 btrfs_err(sctx->send_root->fs_info,
4447 "Wrong command %d in process_all_refs", cmd);
4448 ret = -EINVAL;
4449 goto out;
4452 key.objectid = sctx->cmp_key->objectid;
4453 key.type = BTRFS_INODE_REF_KEY;
4454 key.offset = 0;
4455 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4456 if (ret < 0)
4457 goto out;
4459 while (1) {
4460 eb = path->nodes[0];
4461 slot = path->slots[0];
4462 if (slot >= btrfs_header_nritems(eb)) {
4463 ret = btrfs_next_leaf(root, path);
4464 if (ret < 0)
4465 goto out;
4466 else if (ret > 0)
4467 break;
4468 continue;
4471 btrfs_item_key_to_cpu(eb, &found_key, slot);
4473 if (found_key.objectid != key.objectid ||
4474 (found_key.type != BTRFS_INODE_REF_KEY &&
4475 found_key.type != BTRFS_INODE_EXTREF_KEY))
4476 break;
4478 ret = iterate_inode_ref(root, path, &found_key, 0, cb, sctx);
4479 if (ret < 0)
4480 goto out;
4482 path->slots[0]++;
4484 btrfs_release_path(path);
4487 * We don't actually care about pending_move as we are simply
4488 * re-creating this inode and will be rename'ing it into place once we
4489 * rename the parent directory.
4491 ret = process_recorded_refs(sctx, &pending_move);
4492 out:
4493 btrfs_free_path(path);
4494 return ret;
4497 static int send_set_xattr(struct send_ctx *sctx,
4498 struct fs_path *path,
4499 const char *name, int name_len,
4500 const char *data, int data_len)
4502 int ret = 0;
4504 ret = begin_cmd(sctx, BTRFS_SEND_C_SET_XATTR);
4505 if (ret < 0)
4506 goto out;
4508 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
4509 TLV_PUT_STRING(sctx, BTRFS_SEND_A_XATTR_NAME, name, name_len);
4510 TLV_PUT(sctx, BTRFS_SEND_A_XATTR_DATA, data, data_len);
4512 ret = send_cmd(sctx);
4514 tlv_put_failure:
4515 out:
4516 return ret;
4519 static int send_remove_xattr(struct send_ctx *sctx,
4520 struct fs_path *path,
4521 const char *name, int name_len)
4523 int ret = 0;
4525 ret = begin_cmd(sctx, BTRFS_SEND_C_REMOVE_XATTR);
4526 if (ret < 0)
4527 goto out;
4529 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
4530 TLV_PUT_STRING(sctx, BTRFS_SEND_A_XATTR_NAME, name, name_len);
4532 ret = send_cmd(sctx);
4534 tlv_put_failure:
4535 out:
4536 return ret;
4539 static int __process_new_xattr(int num, struct btrfs_key *di_key,
4540 const char *name, int name_len,
4541 const char *data, int data_len,
4542 u8 type, void *ctx)
4544 int ret;
4545 struct send_ctx *sctx = ctx;
4546 struct fs_path *p;
4547 struct posix_acl_xattr_header dummy_acl;
4549 /* Capabilities are emitted by finish_inode_if_needed */
4550 if (!strncmp(name, XATTR_NAME_CAPS, name_len))
4551 return 0;
4553 p = fs_path_alloc();
4554 if (!p)
4555 return -ENOMEM;
4558 * This hack is needed because empty acls are stored as zero byte
4559 * data in xattrs. Problem with that is, that receiving these zero byte
4560 * acls will fail later. To fix this, we send a dummy acl list that
4561 * only contains the version number and no entries.
4563 if (!strncmp(name, XATTR_NAME_POSIX_ACL_ACCESS, name_len) ||
4564 !strncmp(name, XATTR_NAME_POSIX_ACL_DEFAULT, name_len)) {
4565 if (data_len == 0) {
4566 dummy_acl.a_version =
4567 cpu_to_le32(POSIX_ACL_XATTR_VERSION);
4568 data = (char *)&dummy_acl;
4569 data_len = sizeof(dummy_acl);
4573 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
4574 if (ret < 0)
4575 goto out;
4577 ret = send_set_xattr(sctx, p, name, name_len, data, data_len);
4579 out:
4580 fs_path_free(p);
4581 return ret;
4584 static int __process_deleted_xattr(int num, struct btrfs_key *di_key,
4585 const char *name, int name_len,
4586 const char *data, int data_len,
4587 u8 type, void *ctx)
4589 int ret;
4590 struct send_ctx *sctx = ctx;
4591 struct fs_path *p;
4593 p = fs_path_alloc();
4594 if (!p)
4595 return -ENOMEM;
4597 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
4598 if (ret < 0)
4599 goto out;
4601 ret = send_remove_xattr(sctx, p, name, name_len);
4603 out:
4604 fs_path_free(p);
4605 return ret;
4608 static int process_new_xattr(struct send_ctx *sctx)
4610 int ret = 0;
4612 ret = iterate_dir_item(sctx->send_root, sctx->left_path,
4613 __process_new_xattr, sctx);
4615 return ret;
4618 static int process_deleted_xattr(struct send_ctx *sctx)
4620 return iterate_dir_item(sctx->parent_root, sctx->right_path,
4621 __process_deleted_xattr, sctx);
4624 struct find_xattr_ctx {
4625 const char *name;
4626 int name_len;
4627 int found_idx;
4628 char *found_data;
4629 int found_data_len;
4632 static int __find_xattr(int num, struct btrfs_key *di_key,
4633 const char *name, int name_len,
4634 const char *data, int data_len,
4635 u8 type, void *vctx)
4637 struct find_xattr_ctx *ctx = vctx;
4639 if (name_len == ctx->name_len &&
4640 strncmp(name, ctx->name, name_len) == 0) {
4641 ctx->found_idx = num;
4642 ctx->found_data_len = data_len;
4643 ctx->found_data = kmemdup(data, data_len, GFP_KERNEL);
4644 if (!ctx->found_data)
4645 return -ENOMEM;
4646 return 1;
4648 return 0;
4651 static int find_xattr(struct btrfs_root *root,
4652 struct btrfs_path *path,
4653 struct btrfs_key *key,
4654 const char *name, int name_len,
4655 char **data, int *data_len)
4657 int ret;
4658 struct find_xattr_ctx ctx;
4660 ctx.name = name;
4661 ctx.name_len = name_len;
4662 ctx.found_idx = -1;
4663 ctx.found_data = NULL;
4664 ctx.found_data_len = 0;
4666 ret = iterate_dir_item(root, path, __find_xattr, &ctx);
4667 if (ret < 0)
4668 return ret;
4670 if (ctx.found_idx == -1)
4671 return -ENOENT;
4672 if (data) {
4673 *data = ctx.found_data;
4674 *data_len = ctx.found_data_len;
4675 } else {
4676 kfree(ctx.found_data);
4678 return ctx.found_idx;
4682 static int __process_changed_new_xattr(int num, struct btrfs_key *di_key,
4683 const char *name, int name_len,
4684 const char *data, int data_len,
4685 u8 type, void *ctx)
4687 int ret;
4688 struct send_ctx *sctx = ctx;
4689 char *found_data = NULL;
4690 int found_data_len = 0;
4692 ret = find_xattr(sctx->parent_root, sctx->right_path,
4693 sctx->cmp_key, name, name_len, &found_data,
4694 &found_data_len);
4695 if (ret == -ENOENT) {
4696 ret = __process_new_xattr(num, di_key, name, name_len, data,
4697 data_len, type, ctx);
4698 } else if (ret >= 0) {
4699 if (data_len != found_data_len ||
4700 memcmp(data, found_data, data_len)) {
4701 ret = __process_new_xattr(num, di_key, name, name_len,
4702 data, data_len, type, ctx);
4703 } else {
4704 ret = 0;
4708 kfree(found_data);
4709 return ret;
4712 static int __process_changed_deleted_xattr(int num, struct btrfs_key *di_key,
4713 const char *name, int name_len,
4714 const char *data, int data_len,
4715 u8 type, void *ctx)
4717 int ret;
4718 struct send_ctx *sctx = ctx;
4720 ret = find_xattr(sctx->send_root, sctx->left_path, sctx->cmp_key,
4721 name, name_len, NULL, NULL);
4722 if (ret == -ENOENT)
4723 ret = __process_deleted_xattr(num, di_key, name, name_len, data,
4724 data_len, type, ctx);
4725 else if (ret >= 0)
4726 ret = 0;
4728 return ret;
4731 static int process_changed_xattr(struct send_ctx *sctx)
4733 int ret = 0;
4735 ret = iterate_dir_item(sctx->send_root, sctx->left_path,
4736 __process_changed_new_xattr, sctx);
4737 if (ret < 0)
4738 goto out;
4739 ret = iterate_dir_item(sctx->parent_root, sctx->right_path,
4740 __process_changed_deleted_xattr, sctx);
4742 out:
4743 return ret;
4746 static int process_all_new_xattrs(struct send_ctx *sctx)
4748 int ret;
4749 struct btrfs_root *root;
4750 struct btrfs_path *path;
4751 struct btrfs_key key;
4752 struct btrfs_key found_key;
4753 struct extent_buffer *eb;
4754 int slot;
4756 path = alloc_path_for_send();
4757 if (!path)
4758 return -ENOMEM;
4760 root = sctx->send_root;
4762 key.objectid = sctx->cmp_key->objectid;
4763 key.type = BTRFS_XATTR_ITEM_KEY;
4764 key.offset = 0;
4765 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4766 if (ret < 0)
4767 goto out;
4769 while (1) {
4770 eb = path->nodes[0];
4771 slot = path->slots[0];
4772 if (slot >= btrfs_header_nritems(eb)) {
4773 ret = btrfs_next_leaf(root, path);
4774 if (ret < 0) {
4775 goto out;
4776 } else if (ret > 0) {
4777 ret = 0;
4778 break;
4780 continue;
4783 btrfs_item_key_to_cpu(eb, &found_key, slot);
4784 if (found_key.objectid != key.objectid ||
4785 found_key.type != key.type) {
4786 ret = 0;
4787 goto out;
4790 ret = iterate_dir_item(root, path, __process_new_xattr, sctx);
4791 if (ret < 0)
4792 goto out;
4794 path->slots[0]++;
4797 out:
4798 btrfs_free_path(path);
4799 return ret;
4802 static ssize_t fill_read_buf(struct send_ctx *sctx, u64 offset, u32 len)
4804 struct btrfs_root *root = sctx->send_root;
4805 struct btrfs_fs_info *fs_info = root->fs_info;
4806 struct inode *inode;
4807 struct page *page;
4808 char *addr;
4809 pgoff_t index = offset >> PAGE_SHIFT;
4810 pgoff_t last_index;
4811 unsigned pg_offset = offset_in_page(offset);
4812 ssize_t ret = 0;
4814 inode = btrfs_iget(fs_info->sb, sctx->cur_ino, root);
4815 if (IS_ERR(inode))
4816 return PTR_ERR(inode);
4818 if (offset + len > i_size_read(inode)) {
4819 if (offset > i_size_read(inode))
4820 len = 0;
4821 else
4822 len = offset - i_size_read(inode);
4824 if (len == 0)
4825 goto out;
4827 last_index = (offset + len - 1) >> PAGE_SHIFT;
4829 /* initial readahead */
4830 memset(&sctx->ra, 0, sizeof(struct file_ra_state));
4831 file_ra_state_init(&sctx->ra, inode->i_mapping);
4833 while (index <= last_index) {
4834 unsigned cur_len = min_t(unsigned, len,
4835 PAGE_SIZE - pg_offset);
4837 page = find_lock_page(inode->i_mapping, index);
4838 if (!page) {
4839 page_cache_sync_readahead(inode->i_mapping, &sctx->ra,
4840 NULL, index, last_index + 1 - index);
4842 page = find_or_create_page(inode->i_mapping, index,
4843 GFP_KERNEL);
4844 if (!page) {
4845 ret = -ENOMEM;
4846 break;
4850 if (PageReadahead(page)) {
4851 page_cache_async_readahead(inode->i_mapping, &sctx->ra,
4852 NULL, page, index, last_index + 1 - index);
4855 if (!PageUptodate(page)) {
4856 btrfs_readpage(NULL, page);
4857 lock_page(page);
4858 if (!PageUptodate(page)) {
4859 unlock_page(page);
4860 put_page(page);
4861 ret = -EIO;
4862 break;
4866 addr = kmap(page);
4867 memcpy(sctx->read_buf + ret, addr + pg_offset, cur_len);
4868 kunmap(page);
4869 unlock_page(page);
4870 put_page(page);
4871 index++;
4872 pg_offset = 0;
4873 len -= cur_len;
4874 ret += cur_len;
4876 out:
4877 iput(inode);
4878 return ret;
4882 * Read some bytes from the current inode/file and send a write command to
4883 * user space.
4885 static int send_write(struct send_ctx *sctx, u64 offset, u32 len)
4887 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
4888 int ret = 0;
4889 struct fs_path *p;
4890 ssize_t num_read = 0;
4892 p = fs_path_alloc();
4893 if (!p)
4894 return -ENOMEM;
4896 btrfs_debug(fs_info, "send_write offset=%llu, len=%d", offset, len);
4898 num_read = fill_read_buf(sctx, offset, len);
4899 if (num_read <= 0) {
4900 if (num_read < 0)
4901 ret = num_read;
4902 goto out;
4905 ret = begin_cmd(sctx, BTRFS_SEND_C_WRITE);
4906 if (ret < 0)
4907 goto out;
4909 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
4910 if (ret < 0)
4911 goto out;
4913 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
4914 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
4915 TLV_PUT(sctx, BTRFS_SEND_A_DATA, sctx->read_buf, num_read);
4917 ret = send_cmd(sctx);
4919 tlv_put_failure:
4920 out:
4921 fs_path_free(p);
4922 if (ret < 0)
4923 return ret;
4924 return num_read;
4928 * Send a clone command to user space.
4930 static int send_clone(struct send_ctx *sctx,
4931 u64 offset, u32 len,
4932 struct clone_root *clone_root)
4934 int ret = 0;
4935 struct fs_path *p;
4936 u64 gen;
4938 btrfs_debug(sctx->send_root->fs_info,
4939 "send_clone offset=%llu, len=%d, clone_root=%llu, clone_inode=%llu, clone_offset=%llu",
4940 offset, len, clone_root->root->root_key.objectid,
4941 clone_root->ino, clone_root->offset);
4943 p = fs_path_alloc();
4944 if (!p)
4945 return -ENOMEM;
4947 ret = begin_cmd(sctx, BTRFS_SEND_C_CLONE);
4948 if (ret < 0)
4949 goto out;
4951 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
4952 if (ret < 0)
4953 goto out;
4955 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
4956 TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_LEN, len);
4957 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
4959 if (clone_root->root == sctx->send_root) {
4960 ret = get_inode_info(sctx->send_root, clone_root->ino, NULL,
4961 &gen, NULL, NULL, NULL, NULL);
4962 if (ret < 0)
4963 goto out;
4964 ret = get_cur_path(sctx, clone_root->ino, gen, p);
4965 } else {
4966 ret = get_inode_path(clone_root->root, clone_root->ino, p);
4968 if (ret < 0)
4969 goto out;
4972 * If the parent we're using has a received_uuid set then use that as
4973 * our clone source as that is what we will look for when doing a
4974 * receive.
4976 * This covers the case that we create a snapshot off of a received
4977 * subvolume and then use that as the parent and try to receive on a
4978 * different host.
4980 if (!btrfs_is_empty_uuid(clone_root->root->root_item.received_uuid))
4981 TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID,
4982 clone_root->root->root_item.received_uuid);
4983 else
4984 TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID,
4985 clone_root->root->root_item.uuid);
4986 TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_CTRANSID,
4987 le64_to_cpu(clone_root->root->root_item.ctransid));
4988 TLV_PUT_PATH(sctx, BTRFS_SEND_A_CLONE_PATH, p);
4989 TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_OFFSET,
4990 clone_root->offset);
4992 ret = send_cmd(sctx);
4994 tlv_put_failure:
4995 out:
4996 fs_path_free(p);
4997 return ret;
5001 * Send an update extent command to user space.
5003 static int send_update_extent(struct send_ctx *sctx,
5004 u64 offset, u32 len)
5006 int ret = 0;
5007 struct fs_path *p;
5009 p = fs_path_alloc();
5010 if (!p)
5011 return -ENOMEM;
5013 ret = begin_cmd(sctx, BTRFS_SEND_C_UPDATE_EXTENT);
5014 if (ret < 0)
5015 goto out;
5017 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
5018 if (ret < 0)
5019 goto out;
5021 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
5022 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
5023 TLV_PUT_U64(sctx, BTRFS_SEND_A_SIZE, len);
5025 ret = send_cmd(sctx);
5027 tlv_put_failure:
5028 out:
5029 fs_path_free(p);
5030 return ret;
5033 static int send_hole(struct send_ctx *sctx, u64 end)
5035 struct fs_path *p = NULL;
5036 u64 offset = sctx->cur_inode_last_extent;
5037 u64 len;
5038 int ret = 0;
5041 * A hole that starts at EOF or beyond it. Since we do not yet support
5042 * fallocate (for extent preallocation and hole punching), sending a
5043 * write of zeroes starting at EOF or beyond would later require issuing
5044 * a truncate operation which would undo the write and achieve nothing.
5046 if (offset >= sctx->cur_inode_size)
5047 return 0;
5050 * Don't go beyond the inode's i_size due to prealloc extents that start
5051 * after the i_size.
5053 end = min_t(u64, end, sctx->cur_inode_size);
5055 if (sctx->flags & BTRFS_SEND_FLAG_NO_FILE_DATA)
5056 return send_update_extent(sctx, offset, end - offset);
5058 p = fs_path_alloc();
5059 if (!p)
5060 return -ENOMEM;
5061 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
5062 if (ret < 0)
5063 goto tlv_put_failure;
5064 memset(sctx->read_buf, 0, BTRFS_SEND_READ_SIZE);
5065 while (offset < end) {
5066 len = min_t(u64, end - offset, BTRFS_SEND_READ_SIZE);
5068 ret = begin_cmd(sctx, BTRFS_SEND_C_WRITE);
5069 if (ret < 0)
5070 break;
5071 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
5072 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
5073 TLV_PUT(sctx, BTRFS_SEND_A_DATA, sctx->read_buf, len);
5074 ret = send_cmd(sctx);
5075 if (ret < 0)
5076 break;
5077 offset += len;
5079 sctx->cur_inode_next_write_offset = offset;
5080 tlv_put_failure:
5081 fs_path_free(p);
5082 return ret;
5085 static int send_extent_data(struct send_ctx *sctx,
5086 const u64 offset,
5087 const u64 len)
5089 u64 sent = 0;
5091 if (sctx->flags & BTRFS_SEND_FLAG_NO_FILE_DATA)
5092 return send_update_extent(sctx, offset, len);
5094 while (sent < len) {
5095 u64 size = len - sent;
5096 int ret;
5098 if (size > BTRFS_SEND_READ_SIZE)
5099 size = BTRFS_SEND_READ_SIZE;
5100 ret = send_write(sctx, offset + sent, size);
5101 if (ret < 0)
5102 return ret;
5103 if (!ret)
5104 break;
5105 sent += ret;
5107 return 0;
5111 * Search for a capability xattr related to sctx->cur_ino. If the capability is
5112 * found, call send_set_xattr function to emit it.
5114 * Return 0 if there isn't a capability, or when the capability was emitted
5115 * successfully, or < 0 if an error occurred.
5117 static int send_capabilities(struct send_ctx *sctx)
5119 struct fs_path *fspath = NULL;
5120 struct btrfs_path *path;
5121 struct btrfs_dir_item *di;
5122 struct extent_buffer *leaf;
5123 unsigned long data_ptr;
5124 char *buf = NULL;
5125 int buf_len;
5126 int ret = 0;
5128 path = alloc_path_for_send();
5129 if (!path)
5130 return -ENOMEM;
5132 di = btrfs_lookup_xattr(NULL, sctx->send_root, path, sctx->cur_ino,
5133 XATTR_NAME_CAPS, strlen(XATTR_NAME_CAPS), 0);
5134 if (!di) {
5135 /* There is no xattr for this inode */
5136 goto out;
5137 } else if (IS_ERR(di)) {
5138 ret = PTR_ERR(di);
5139 goto out;
5142 leaf = path->nodes[0];
5143 buf_len = btrfs_dir_data_len(leaf, di);
5145 fspath = fs_path_alloc();
5146 buf = kmalloc(buf_len, GFP_KERNEL);
5147 if (!fspath || !buf) {
5148 ret = -ENOMEM;
5149 goto out;
5152 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, fspath);
5153 if (ret < 0)
5154 goto out;
5156 data_ptr = (unsigned long)(di + 1) + btrfs_dir_name_len(leaf, di);
5157 read_extent_buffer(leaf, buf, data_ptr, buf_len);
5159 ret = send_set_xattr(sctx, fspath, XATTR_NAME_CAPS,
5160 strlen(XATTR_NAME_CAPS), buf, buf_len);
5161 out:
5162 kfree(buf);
5163 fs_path_free(fspath);
5164 btrfs_free_path(path);
5165 return ret;
5168 static int clone_range(struct send_ctx *sctx,
5169 struct clone_root *clone_root,
5170 const u64 disk_byte,
5171 u64 data_offset,
5172 u64 offset,
5173 u64 len)
5175 struct btrfs_path *path;
5176 struct btrfs_key key;
5177 int ret;
5178 u64 clone_src_i_size = 0;
5181 * Prevent cloning from a zero offset with a length matching the sector
5182 * size because in some scenarios this will make the receiver fail.
5184 * For example, if in the source filesystem the extent at offset 0
5185 * has a length of sectorsize and it was written using direct IO, then
5186 * it can never be an inline extent (even if compression is enabled).
5187 * Then this extent can be cloned in the original filesystem to a non
5188 * zero file offset, but it may not be possible to clone in the
5189 * destination filesystem because it can be inlined due to compression
5190 * on the destination filesystem (as the receiver's write operations are
5191 * always done using buffered IO). The same happens when the original
5192 * filesystem does not have compression enabled but the destination
5193 * filesystem has.
5195 if (clone_root->offset == 0 &&
5196 len == sctx->send_root->fs_info->sectorsize)
5197 return send_extent_data(sctx, offset, len);
5199 path = alloc_path_for_send();
5200 if (!path)
5201 return -ENOMEM;
5204 * There are inodes that have extents that lie behind its i_size. Don't
5205 * accept clones from these extents.
5207 ret = __get_inode_info(clone_root->root, path, clone_root->ino,
5208 &clone_src_i_size, NULL, NULL, NULL, NULL, NULL);
5209 btrfs_release_path(path);
5210 if (ret < 0)
5211 goto out;
5214 * We can't send a clone operation for the entire range if we find
5215 * extent items in the respective range in the source file that
5216 * refer to different extents or if we find holes.
5217 * So check for that and do a mix of clone and regular write/copy
5218 * operations if needed.
5220 * Example:
5222 * mkfs.btrfs -f /dev/sda
5223 * mount /dev/sda /mnt
5224 * xfs_io -f -c "pwrite -S 0xaa 0K 100K" /mnt/foo
5225 * cp --reflink=always /mnt/foo /mnt/bar
5226 * xfs_io -c "pwrite -S 0xbb 50K 50K" /mnt/foo
5227 * btrfs subvolume snapshot -r /mnt /mnt/snap
5229 * If when we send the snapshot and we are processing file bar (which
5230 * has a higher inode number than foo) we blindly send a clone operation
5231 * for the [0, 100K[ range from foo to bar, the receiver ends up getting
5232 * a file bar that matches the content of file foo - iow, doesn't match
5233 * the content from bar in the original filesystem.
5235 key.objectid = clone_root->ino;
5236 key.type = BTRFS_EXTENT_DATA_KEY;
5237 key.offset = clone_root->offset;
5238 ret = btrfs_search_slot(NULL, clone_root->root, &key, path, 0, 0);
5239 if (ret < 0)
5240 goto out;
5241 if (ret > 0 && path->slots[0] > 0) {
5242 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0] - 1);
5243 if (key.objectid == clone_root->ino &&
5244 key.type == BTRFS_EXTENT_DATA_KEY)
5245 path->slots[0]--;
5248 while (true) {
5249 struct extent_buffer *leaf = path->nodes[0];
5250 int slot = path->slots[0];
5251 struct btrfs_file_extent_item *ei;
5252 u8 type;
5253 u64 ext_len;
5254 u64 clone_len;
5255 u64 clone_data_offset;
5257 if (slot >= btrfs_header_nritems(leaf)) {
5258 ret = btrfs_next_leaf(clone_root->root, path);
5259 if (ret < 0)
5260 goto out;
5261 else if (ret > 0)
5262 break;
5263 continue;
5266 btrfs_item_key_to_cpu(leaf, &key, slot);
5269 * We might have an implicit trailing hole (NO_HOLES feature
5270 * enabled). We deal with it after leaving this loop.
5272 if (key.objectid != clone_root->ino ||
5273 key.type != BTRFS_EXTENT_DATA_KEY)
5274 break;
5276 ei = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
5277 type = btrfs_file_extent_type(leaf, ei);
5278 if (type == BTRFS_FILE_EXTENT_INLINE) {
5279 ext_len = btrfs_file_extent_ram_bytes(leaf, ei);
5280 ext_len = PAGE_ALIGN(ext_len);
5281 } else {
5282 ext_len = btrfs_file_extent_num_bytes(leaf, ei);
5285 if (key.offset + ext_len <= clone_root->offset)
5286 goto next;
5288 if (key.offset > clone_root->offset) {
5289 /* Implicit hole, NO_HOLES feature enabled. */
5290 u64 hole_len = key.offset - clone_root->offset;
5292 if (hole_len > len)
5293 hole_len = len;
5294 ret = send_extent_data(sctx, offset, hole_len);
5295 if (ret < 0)
5296 goto out;
5298 len -= hole_len;
5299 if (len == 0)
5300 break;
5301 offset += hole_len;
5302 clone_root->offset += hole_len;
5303 data_offset += hole_len;
5306 if (key.offset >= clone_root->offset + len)
5307 break;
5309 if (key.offset >= clone_src_i_size)
5310 break;
5312 if (key.offset + ext_len > clone_src_i_size)
5313 ext_len = clone_src_i_size - key.offset;
5315 clone_data_offset = btrfs_file_extent_offset(leaf, ei);
5316 if (btrfs_file_extent_disk_bytenr(leaf, ei) == disk_byte) {
5317 clone_root->offset = key.offset;
5318 if (clone_data_offset < data_offset &&
5319 clone_data_offset + ext_len > data_offset) {
5320 u64 extent_offset;
5322 extent_offset = data_offset - clone_data_offset;
5323 ext_len -= extent_offset;
5324 clone_data_offset += extent_offset;
5325 clone_root->offset += extent_offset;
5329 clone_len = min_t(u64, ext_len, len);
5331 if (btrfs_file_extent_disk_bytenr(leaf, ei) == disk_byte &&
5332 clone_data_offset == data_offset) {
5333 const u64 src_end = clone_root->offset + clone_len;
5334 const u64 sectorsize = SZ_64K;
5337 * We can't clone the last block, when its size is not
5338 * sector size aligned, into the middle of a file. If we
5339 * do so, the receiver will get a failure (-EINVAL) when
5340 * trying to clone or will silently corrupt the data in
5341 * the destination file if it's on a kernel without the
5342 * fix introduced by commit ac765f83f1397646
5343 * ("Btrfs: fix data corruption due to cloning of eof
5344 * block).
5346 * So issue a clone of the aligned down range plus a
5347 * regular write for the eof block, if we hit that case.
5349 * Also, we use the maximum possible sector size, 64K,
5350 * because we don't know what's the sector size of the
5351 * filesystem that receives the stream, so we have to
5352 * assume the largest possible sector size.
5354 if (src_end == clone_src_i_size &&
5355 !IS_ALIGNED(src_end, sectorsize) &&
5356 offset + clone_len < sctx->cur_inode_size) {
5357 u64 slen;
5359 slen = ALIGN_DOWN(src_end - clone_root->offset,
5360 sectorsize);
5361 if (slen > 0) {
5362 ret = send_clone(sctx, offset, slen,
5363 clone_root);
5364 if (ret < 0)
5365 goto out;
5367 ret = send_extent_data(sctx, offset + slen,
5368 clone_len - slen);
5369 } else {
5370 ret = send_clone(sctx, offset, clone_len,
5371 clone_root);
5373 } else {
5374 ret = send_extent_data(sctx, offset, clone_len);
5377 if (ret < 0)
5378 goto out;
5380 len -= clone_len;
5381 if (len == 0)
5382 break;
5383 offset += clone_len;
5384 clone_root->offset += clone_len;
5385 data_offset += clone_len;
5386 next:
5387 path->slots[0]++;
5390 if (len > 0)
5391 ret = send_extent_data(sctx, offset, len);
5392 else
5393 ret = 0;
5394 out:
5395 btrfs_free_path(path);
5396 return ret;
5399 static int send_write_or_clone(struct send_ctx *sctx,
5400 struct btrfs_path *path,
5401 struct btrfs_key *key,
5402 struct clone_root *clone_root)
5404 int ret = 0;
5405 struct btrfs_file_extent_item *ei;
5406 u64 offset = key->offset;
5407 u64 len;
5408 u8 type;
5409 u64 bs = sctx->send_root->fs_info->sb->s_blocksize;
5411 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
5412 struct btrfs_file_extent_item);
5413 type = btrfs_file_extent_type(path->nodes[0], ei);
5414 if (type == BTRFS_FILE_EXTENT_INLINE) {
5415 len = btrfs_file_extent_ram_bytes(path->nodes[0], ei);
5417 * it is possible the inline item won't cover the whole page,
5418 * but there may be items after this page. Make
5419 * sure to send the whole thing
5421 len = PAGE_ALIGN(len);
5422 } else {
5423 len = btrfs_file_extent_num_bytes(path->nodes[0], ei);
5426 if (offset >= sctx->cur_inode_size) {
5427 ret = 0;
5428 goto out;
5430 if (offset + len > sctx->cur_inode_size)
5431 len = sctx->cur_inode_size - offset;
5432 if (len == 0) {
5433 ret = 0;
5434 goto out;
5437 if (clone_root && IS_ALIGNED(offset + len, bs)) {
5438 u64 disk_byte;
5439 u64 data_offset;
5441 disk_byte = btrfs_file_extent_disk_bytenr(path->nodes[0], ei);
5442 data_offset = btrfs_file_extent_offset(path->nodes[0], ei);
5443 ret = clone_range(sctx, clone_root, disk_byte, data_offset,
5444 offset, len);
5445 } else {
5446 ret = send_extent_data(sctx, offset, len);
5448 sctx->cur_inode_next_write_offset = offset + len;
5449 out:
5450 return ret;
5453 static int is_extent_unchanged(struct send_ctx *sctx,
5454 struct btrfs_path *left_path,
5455 struct btrfs_key *ekey)
5457 int ret = 0;
5458 struct btrfs_key key;
5459 struct btrfs_path *path = NULL;
5460 struct extent_buffer *eb;
5461 int slot;
5462 struct btrfs_key found_key;
5463 struct btrfs_file_extent_item *ei;
5464 u64 left_disknr;
5465 u64 right_disknr;
5466 u64 left_offset;
5467 u64 right_offset;
5468 u64 left_offset_fixed;
5469 u64 left_len;
5470 u64 right_len;
5471 u64 left_gen;
5472 u64 right_gen;
5473 u8 left_type;
5474 u8 right_type;
5476 path = alloc_path_for_send();
5477 if (!path)
5478 return -ENOMEM;
5480 eb = left_path->nodes[0];
5481 slot = left_path->slots[0];
5482 ei = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
5483 left_type = btrfs_file_extent_type(eb, ei);
5485 if (left_type != BTRFS_FILE_EXTENT_REG) {
5486 ret = 0;
5487 goto out;
5489 left_disknr = btrfs_file_extent_disk_bytenr(eb, ei);
5490 left_len = btrfs_file_extent_num_bytes(eb, ei);
5491 left_offset = btrfs_file_extent_offset(eb, ei);
5492 left_gen = btrfs_file_extent_generation(eb, ei);
5495 * Following comments will refer to these graphics. L is the left
5496 * extents which we are checking at the moment. 1-8 are the right
5497 * extents that we iterate.
5499 * |-----L-----|
5500 * |-1-|-2a-|-3-|-4-|-5-|-6-|
5502 * |-----L-----|
5503 * |--1--|-2b-|...(same as above)
5505 * Alternative situation. Happens on files where extents got split.
5506 * |-----L-----|
5507 * |-----------7-----------|-6-|
5509 * Alternative situation. Happens on files which got larger.
5510 * |-----L-----|
5511 * |-8-|
5512 * Nothing follows after 8.
5515 key.objectid = ekey->objectid;
5516 key.type = BTRFS_EXTENT_DATA_KEY;
5517 key.offset = ekey->offset;
5518 ret = btrfs_search_slot_for_read(sctx->parent_root, &key, path, 0, 0);
5519 if (ret < 0)
5520 goto out;
5521 if (ret) {
5522 ret = 0;
5523 goto out;
5527 * Handle special case where the right side has no extents at all.
5529 eb = path->nodes[0];
5530 slot = path->slots[0];
5531 btrfs_item_key_to_cpu(eb, &found_key, slot);
5532 if (found_key.objectid != key.objectid ||
5533 found_key.type != key.type) {
5534 /* If we're a hole then just pretend nothing changed */
5535 ret = (left_disknr) ? 0 : 1;
5536 goto out;
5540 * We're now on 2a, 2b or 7.
5542 key = found_key;
5543 while (key.offset < ekey->offset + left_len) {
5544 ei = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
5545 right_type = btrfs_file_extent_type(eb, ei);
5546 if (right_type != BTRFS_FILE_EXTENT_REG &&
5547 right_type != BTRFS_FILE_EXTENT_INLINE) {
5548 ret = 0;
5549 goto out;
5552 if (right_type == BTRFS_FILE_EXTENT_INLINE) {
5553 right_len = btrfs_file_extent_ram_bytes(eb, ei);
5554 right_len = PAGE_ALIGN(right_len);
5555 } else {
5556 right_len = btrfs_file_extent_num_bytes(eb, ei);
5560 * Are we at extent 8? If yes, we know the extent is changed.
5561 * This may only happen on the first iteration.
5563 if (found_key.offset + right_len <= ekey->offset) {
5564 /* If we're a hole just pretend nothing changed */
5565 ret = (left_disknr) ? 0 : 1;
5566 goto out;
5570 * We just wanted to see if when we have an inline extent, what
5571 * follows it is a regular extent (wanted to check the above
5572 * condition for inline extents too). This should normally not
5573 * happen but it's possible for example when we have an inline
5574 * compressed extent representing data with a size matching
5575 * the page size (currently the same as sector size).
5577 if (right_type == BTRFS_FILE_EXTENT_INLINE) {
5578 ret = 0;
5579 goto out;
5582 right_disknr = btrfs_file_extent_disk_bytenr(eb, ei);
5583 right_offset = btrfs_file_extent_offset(eb, ei);
5584 right_gen = btrfs_file_extent_generation(eb, ei);
5586 left_offset_fixed = left_offset;
5587 if (key.offset < ekey->offset) {
5588 /* Fix the right offset for 2a and 7. */
5589 right_offset += ekey->offset - key.offset;
5590 } else {
5591 /* Fix the left offset for all behind 2a and 2b */
5592 left_offset_fixed += key.offset - ekey->offset;
5596 * Check if we have the same extent.
5598 if (left_disknr != right_disknr ||
5599 left_offset_fixed != right_offset ||
5600 left_gen != right_gen) {
5601 ret = 0;
5602 goto out;
5606 * Go to the next extent.
5608 ret = btrfs_next_item(sctx->parent_root, path);
5609 if (ret < 0)
5610 goto out;
5611 if (!ret) {
5612 eb = path->nodes[0];
5613 slot = path->slots[0];
5614 btrfs_item_key_to_cpu(eb, &found_key, slot);
5616 if (ret || found_key.objectid != key.objectid ||
5617 found_key.type != key.type) {
5618 key.offset += right_len;
5619 break;
5621 if (found_key.offset != key.offset + right_len) {
5622 ret = 0;
5623 goto out;
5625 key = found_key;
5629 * We're now behind the left extent (treat as unchanged) or at the end
5630 * of the right side (treat as changed).
5632 if (key.offset >= ekey->offset + left_len)
5633 ret = 1;
5634 else
5635 ret = 0;
5638 out:
5639 btrfs_free_path(path);
5640 return ret;
5643 static int get_last_extent(struct send_ctx *sctx, u64 offset)
5645 struct btrfs_path *path;
5646 struct btrfs_root *root = sctx->send_root;
5647 struct btrfs_key key;
5648 int ret;
5650 path = alloc_path_for_send();
5651 if (!path)
5652 return -ENOMEM;
5654 sctx->cur_inode_last_extent = 0;
5656 key.objectid = sctx->cur_ino;
5657 key.type = BTRFS_EXTENT_DATA_KEY;
5658 key.offset = offset;
5659 ret = btrfs_search_slot_for_read(root, &key, path, 0, 1);
5660 if (ret < 0)
5661 goto out;
5662 ret = 0;
5663 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
5664 if (key.objectid != sctx->cur_ino || key.type != BTRFS_EXTENT_DATA_KEY)
5665 goto out;
5667 sctx->cur_inode_last_extent = btrfs_file_extent_end(path);
5668 out:
5669 btrfs_free_path(path);
5670 return ret;
5673 static int range_is_hole_in_parent(struct send_ctx *sctx,
5674 const u64 start,
5675 const u64 end)
5677 struct btrfs_path *path;
5678 struct btrfs_key key;
5679 struct btrfs_root *root = sctx->parent_root;
5680 u64 search_start = start;
5681 int ret;
5683 path = alloc_path_for_send();
5684 if (!path)
5685 return -ENOMEM;
5687 key.objectid = sctx->cur_ino;
5688 key.type = BTRFS_EXTENT_DATA_KEY;
5689 key.offset = search_start;
5690 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5691 if (ret < 0)
5692 goto out;
5693 if (ret > 0 && path->slots[0] > 0)
5694 path->slots[0]--;
5696 while (search_start < end) {
5697 struct extent_buffer *leaf = path->nodes[0];
5698 int slot = path->slots[0];
5699 struct btrfs_file_extent_item *fi;
5700 u64 extent_end;
5702 if (slot >= btrfs_header_nritems(leaf)) {
5703 ret = btrfs_next_leaf(root, path);
5704 if (ret < 0)
5705 goto out;
5706 else if (ret > 0)
5707 break;
5708 continue;
5711 btrfs_item_key_to_cpu(leaf, &key, slot);
5712 if (key.objectid < sctx->cur_ino ||
5713 key.type < BTRFS_EXTENT_DATA_KEY)
5714 goto next;
5715 if (key.objectid > sctx->cur_ino ||
5716 key.type > BTRFS_EXTENT_DATA_KEY ||
5717 key.offset >= end)
5718 break;
5720 fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
5721 extent_end = btrfs_file_extent_end(path);
5722 if (extent_end <= start)
5723 goto next;
5724 if (btrfs_file_extent_disk_bytenr(leaf, fi) == 0) {
5725 search_start = extent_end;
5726 goto next;
5728 ret = 0;
5729 goto out;
5730 next:
5731 path->slots[0]++;
5733 ret = 1;
5734 out:
5735 btrfs_free_path(path);
5736 return ret;
5739 static int maybe_send_hole(struct send_ctx *sctx, struct btrfs_path *path,
5740 struct btrfs_key *key)
5742 int ret = 0;
5744 if (sctx->cur_ino != key->objectid || !need_send_hole(sctx))
5745 return 0;
5747 if (sctx->cur_inode_last_extent == (u64)-1) {
5748 ret = get_last_extent(sctx, key->offset - 1);
5749 if (ret)
5750 return ret;
5753 if (path->slots[0] == 0 &&
5754 sctx->cur_inode_last_extent < key->offset) {
5756 * We might have skipped entire leafs that contained only
5757 * file extent items for our current inode. These leafs have
5758 * a generation number smaller (older) than the one in the
5759 * current leaf and the leaf our last extent came from, and
5760 * are located between these 2 leafs.
5762 ret = get_last_extent(sctx, key->offset - 1);
5763 if (ret)
5764 return ret;
5767 if (sctx->cur_inode_last_extent < key->offset) {
5768 ret = range_is_hole_in_parent(sctx,
5769 sctx->cur_inode_last_extent,
5770 key->offset);
5771 if (ret < 0)
5772 return ret;
5773 else if (ret == 0)
5774 ret = send_hole(sctx, key->offset);
5775 else
5776 ret = 0;
5778 sctx->cur_inode_last_extent = btrfs_file_extent_end(path);
5779 return ret;
5782 static int process_extent(struct send_ctx *sctx,
5783 struct btrfs_path *path,
5784 struct btrfs_key *key)
5786 struct clone_root *found_clone = NULL;
5787 int ret = 0;
5789 if (S_ISLNK(sctx->cur_inode_mode))
5790 return 0;
5792 if (sctx->parent_root && !sctx->cur_inode_new) {
5793 ret = is_extent_unchanged(sctx, path, key);
5794 if (ret < 0)
5795 goto out;
5796 if (ret) {
5797 ret = 0;
5798 goto out_hole;
5800 } else {
5801 struct btrfs_file_extent_item *ei;
5802 u8 type;
5804 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
5805 struct btrfs_file_extent_item);
5806 type = btrfs_file_extent_type(path->nodes[0], ei);
5807 if (type == BTRFS_FILE_EXTENT_PREALLOC ||
5808 type == BTRFS_FILE_EXTENT_REG) {
5810 * The send spec does not have a prealloc command yet,
5811 * so just leave a hole for prealloc'ed extents until
5812 * we have enough commands queued up to justify rev'ing
5813 * the send spec.
5815 if (type == BTRFS_FILE_EXTENT_PREALLOC) {
5816 ret = 0;
5817 goto out;
5820 /* Have a hole, just skip it. */
5821 if (btrfs_file_extent_disk_bytenr(path->nodes[0], ei) == 0) {
5822 ret = 0;
5823 goto out;
5828 ret = find_extent_clone(sctx, path, key->objectid, key->offset,
5829 sctx->cur_inode_size, &found_clone);
5830 if (ret != -ENOENT && ret < 0)
5831 goto out;
5833 ret = send_write_or_clone(sctx, path, key, found_clone);
5834 if (ret)
5835 goto out;
5836 out_hole:
5837 ret = maybe_send_hole(sctx, path, key);
5838 out:
5839 return ret;
5842 static int process_all_extents(struct send_ctx *sctx)
5844 int ret;
5845 struct btrfs_root *root;
5846 struct btrfs_path *path;
5847 struct btrfs_key key;
5848 struct btrfs_key found_key;
5849 struct extent_buffer *eb;
5850 int slot;
5852 root = sctx->send_root;
5853 path = alloc_path_for_send();
5854 if (!path)
5855 return -ENOMEM;
5857 key.objectid = sctx->cmp_key->objectid;
5858 key.type = BTRFS_EXTENT_DATA_KEY;
5859 key.offset = 0;
5860 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5861 if (ret < 0)
5862 goto out;
5864 while (1) {
5865 eb = path->nodes[0];
5866 slot = path->slots[0];
5868 if (slot >= btrfs_header_nritems(eb)) {
5869 ret = btrfs_next_leaf(root, path);
5870 if (ret < 0) {
5871 goto out;
5872 } else if (ret > 0) {
5873 ret = 0;
5874 break;
5876 continue;
5879 btrfs_item_key_to_cpu(eb, &found_key, slot);
5881 if (found_key.objectid != key.objectid ||
5882 found_key.type != key.type) {
5883 ret = 0;
5884 goto out;
5887 ret = process_extent(sctx, path, &found_key);
5888 if (ret < 0)
5889 goto out;
5891 path->slots[0]++;
5894 out:
5895 btrfs_free_path(path);
5896 return ret;
5899 static int process_recorded_refs_if_needed(struct send_ctx *sctx, int at_end,
5900 int *pending_move,
5901 int *refs_processed)
5903 int ret = 0;
5905 if (sctx->cur_ino == 0)
5906 goto out;
5907 if (!at_end && sctx->cur_ino == sctx->cmp_key->objectid &&
5908 sctx->cmp_key->type <= BTRFS_INODE_EXTREF_KEY)
5909 goto out;
5910 if (list_empty(&sctx->new_refs) && list_empty(&sctx->deleted_refs))
5911 goto out;
5913 ret = process_recorded_refs(sctx, pending_move);
5914 if (ret < 0)
5915 goto out;
5917 *refs_processed = 1;
5918 out:
5919 return ret;
5922 static int finish_inode_if_needed(struct send_ctx *sctx, int at_end)
5924 int ret = 0;
5925 u64 left_mode;
5926 u64 left_uid;
5927 u64 left_gid;
5928 u64 right_mode;
5929 u64 right_uid;
5930 u64 right_gid;
5931 int need_chmod = 0;
5932 int need_chown = 0;
5933 int need_truncate = 1;
5934 int pending_move = 0;
5935 int refs_processed = 0;
5937 if (sctx->ignore_cur_inode)
5938 return 0;
5940 ret = process_recorded_refs_if_needed(sctx, at_end, &pending_move,
5941 &refs_processed);
5942 if (ret < 0)
5943 goto out;
5946 * We have processed the refs and thus need to advance send_progress.
5947 * Now, calls to get_cur_xxx will take the updated refs of the current
5948 * inode into account.
5950 * On the other hand, if our current inode is a directory and couldn't
5951 * be moved/renamed because its parent was renamed/moved too and it has
5952 * a higher inode number, we can only move/rename our current inode
5953 * after we moved/renamed its parent. Therefore in this case operate on
5954 * the old path (pre move/rename) of our current inode, and the
5955 * move/rename will be performed later.
5957 if (refs_processed && !pending_move)
5958 sctx->send_progress = sctx->cur_ino + 1;
5960 if (sctx->cur_ino == 0 || sctx->cur_inode_deleted)
5961 goto out;
5962 if (!at_end && sctx->cmp_key->objectid == sctx->cur_ino)
5963 goto out;
5965 ret = get_inode_info(sctx->send_root, sctx->cur_ino, NULL, NULL,
5966 &left_mode, &left_uid, &left_gid, NULL);
5967 if (ret < 0)
5968 goto out;
5970 if (!sctx->parent_root || sctx->cur_inode_new) {
5971 need_chown = 1;
5972 if (!S_ISLNK(sctx->cur_inode_mode))
5973 need_chmod = 1;
5974 if (sctx->cur_inode_next_write_offset == sctx->cur_inode_size)
5975 need_truncate = 0;
5976 } else {
5977 u64 old_size;
5979 ret = get_inode_info(sctx->parent_root, sctx->cur_ino,
5980 &old_size, NULL, &right_mode, &right_uid,
5981 &right_gid, NULL);
5982 if (ret < 0)
5983 goto out;
5985 if (left_uid != right_uid || left_gid != right_gid)
5986 need_chown = 1;
5987 if (!S_ISLNK(sctx->cur_inode_mode) && left_mode != right_mode)
5988 need_chmod = 1;
5989 if ((old_size == sctx->cur_inode_size) ||
5990 (sctx->cur_inode_size > old_size &&
5991 sctx->cur_inode_next_write_offset == sctx->cur_inode_size))
5992 need_truncate = 0;
5995 if (S_ISREG(sctx->cur_inode_mode)) {
5996 if (need_send_hole(sctx)) {
5997 if (sctx->cur_inode_last_extent == (u64)-1 ||
5998 sctx->cur_inode_last_extent <
5999 sctx->cur_inode_size) {
6000 ret = get_last_extent(sctx, (u64)-1);
6001 if (ret)
6002 goto out;
6004 if (sctx->cur_inode_last_extent <
6005 sctx->cur_inode_size) {
6006 ret = send_hole(sctx, sctx->cur_inode_size);
6007 if (ret)
6008 goto out;
6011 if (need_truncate) {
6012 ret = send_truncate(sctx, sctx->cur_ino,
6013 sctx->cur_inode_gen,
6014 sctx->cur_inode_size);
6015 if (ret < 0)
6016 goto out;
6020 if (need_chown) {
6021 ret = send_chown(sctx, sctx->cur_ino, sctx->cur_inode_gen,
6022 left_uid, left_gid);
6023 if (ret < 0)
6024 goto out;
6026 if (need_chmod) {
6027 ret = send_chmod(sctx, sctx->cur_ino, sctx->cur_inode_gen,
6028 left_mode);
6029 if (ret < 0)
6030 goto out;
6033 ret = send_capabilities(sctx);
6034 if (ret < 0)
6035 goto out;
6038 * If other directory inodes depended on our current directory
6039 * inode's move/rename, now do their move/rename operations.
6041 if (!is_waiting_for_move(sctx, sctx->cur_ino)) {
6042 ret = apply_children_dir_moves(sctx);
6043 if (ret)
6044 goto out;
6046 * Need to send that every time, no matter if it actually
6047 * changed between the two trees as we have done changes to
6048 * the inode before. If our inode is a directory and it's
6049 * waiting to be moved/renamed, we will send its utimes when
6050 * it's moved/renamed, therefore we don't need to do it here.
6052 sctx->send_progress = sctx->cur_ino + 1;
6053 ret = send_utimes(sctx, sctx->cur_ino, sctx->cur_inode_gen);
6054 if (ret < 0)
6055 goto out;
6058 out:
6059 return ret;
6062 struct parent_paths_ctx {
6063 struct list_head *refs;
6064 struct send_ctx *sctx;
6067 static int record_parent_ref(int num, u64 dir, int index, struct fs_path *name,
6068 void *ctx)
6070 struct parent_paths_ctx *ppctx = ctx;
6072 return record_ref(ppctx->sctx->parent_root, dir, name, ppctx->sctx,
6073 ppctx->refs);
6077 * Issue unlink operations for all paths of the current inode found in the
6078 * parent snapshot.
6080 static int btrfs_unlink_all_paths(struct send_ctx *sctx)
6082 LIST_HEAD(deleted_refs);
6083 struct btrfs_path *path;
6084 struct btrfs_key key;
6085 struct parent_paths_ctx ctx;
6086 int ret;
6088 path = alloc_path_for_send();
6089 if (!path)
6090 return -ENOMEM;
6092 key.objectid = sctx->cur_ino;
6093 key.type = BTRFS_INODE_REF_KEY;
6094 key.offset = 0;
6095 ret = btrfs_search_slot(NULL, sctx->parent_root, &key, path, 0, 0);
6096 if (ret < 0)
6097 goto out;
6099 ctx.refs = &deleted_refs;
6100 ctx.sctx = sctx;
6102 while (true) {
6103 struct extent_buffer *eb = path->nodes[0];
6104 int slot = path->slots[0];
6106 if (slot >= btrfs_header_nritems(eb)) {
6107 ret = btrfs_next_leaf(sctx->parent_root, path);
6108 if (ret < 0)
6109 goto out;
6110 else if (ret > 0)
6111 break;
6112 continue;
6115 btrfs_item_key_to_cpu(eb, &key, slot);
6116 if (key.objectid != sctx->cur_ino)
6117 break;
6118 if (key.type != BTRFS_INODE_REF_KEY &&
6119 key.type != BTRFS_INODE_EXTREF_KEY)
6120 break;
6122 ret = iterate_inode_ref(sctx->parent_root, path, &key, 1,
6123 record_parent_ref, &ctx);
6124 if (ret < 0)
6125 goto out;
6127 path->slots[0]++;
6130 while (!list_empty(&deleted_refs)) {
6131 struct recorded_ref *ref;
6133 ref = list_first_entry(&deleted_refs, struct recorded_ref, list);
6134 ret = send_unlink(sctx, ref->full_path);
6135 if (ret < 0)
6136 goto out;
6137 fs_path_free(ref->full_path);
6138 list_del(&ref->list);
6139 kfree(ref);
6141 ret = 0;
6142 out:
6143 btrfs_free_path(path);
6144 if (ret)
6145 __free_recorded_refs(&deleted_refs);
6146 return ret;
6149 static int changed_inode(struct send_ctx *sctx,
6150 enum btrfs_compare_tree_result result)
6152 int ret = 0;
6153 struct btrfs_key *key = sctx->cmp_key;
6154 struct btrfs_inode_item *left_ii = NULL;
6155 struct btrfs_inode_item *right_ii = NULL;
6156 u64 left_gen = 0;
6157 u64 right_gen = 0;
6159 sctx->cur_ino = key->objectid;
6160 sctx->cur_inode_new_gen = 0;
6161 sctx->cur_inode_last_extent = (u64)-1;
6162 sctx->cur_inode_next_write_offset = 0;
6163 sctx->ignore_cur_inode = false;
6166 * Set send_progress to current inode. This will tell all get_cur_xxx
6167 * functions that the current inode's refs are not updated yet. Later,
6168 * when process_recorded_refs is finished, it is set to cur_ino + 1.
6170 sctx->send_progress = sctx->cur_ino;
6172 if (result == BTRFS_COMPARE_TREE_NEW ||
6173 result == BTRFS_COMPARE_TREE_CHANGED) {
6174 left_ii = btrfs_item_ptr(sctx->left_path->nodes[0],
6175 sctx->left_path->slots[0],
6176 struct btrfs_inode_item);
6177 left_gen = btrfs_inode_generation(sctx->left_path->nodes[0],
6178 left_ii);
6179 } else {
6180 right_ii = btrfs_item_ptr(sctx->right_path->nodes[0],
6181 sctx->right_path->slots[0],
6182 struct btrfs_inode_item);
6183 right_gen = btrfs_inode_generation(sctx->right_path->nodes[0],
6184 right_ii);
6186 if (result == BTRFS_COMPARE_TREE_CHANGED) {
6187 right_ii = btrfs_item_ptr(sctx->right_path->nodes[0],
6188 sctx->right_path->slots[0],
6189 struct btrfs_inode_item);
6191 right_gen = btrfs_inode_generation(sctx->right_path->nodes[0],
6192 right_ii);
6195 * The cur_ino = root dir case is special here. We can't treat
6196 * the inode as deleted+reused because it would generate a
6197 * stream that tries to delete/mkdir the root dir.
6199 if (left_gen != right_gen &&
6200 sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID)
6201 sctx->cur_inode_new_gen = 1;
6205 * Normally we do not find inodes with a link count of zero (orphans)
6206 * because the most common case is to create a snapshot and use it
6207 * for a send operation. However other less common use cases involve
6208 * using a subvolume and send it after turning it to RO mode just
6209 * after deleting all hard links of a file while holding an open
6210 * file descriptor against it or turning a RO snapshot into RW mode,
6211 * keep an open file descriptor against a file, delete it and then
6212 * turn the snapshot back to RO mode before using it for a send
6213 * operation. So if we find such cases, ignore the inode and all its
6214 * items completely if it's a new inode, or if it's a changed inode
6215 * make sure all its previous paths (from the parent snapshot) are all
6216 * unlinked and all other the inode items are ignored.
6218 if (result == BTRFS_COMPARE_TREE_NEW ||
6219 result == BTRFS_COMPARE_TREE_CHANGED) {
6220 u32 nlinks;
6222 nlinks = btrfs_inode_nlink(sctx->left_path->nodes[0], left_ii);
6223 if (nlinks == 0) {
6224 sctx->ignore_cur_inode = true;
6225 if (result == BTRFS_COMPARE_TREE_CHANGED)
6226 ret = btrfs_unlink_all_paths(sctx);
6227 goto out;
6231 if (result == BTRFS_COMPARE_TREE_NEW) {
6232 sctx->cur_inode_gen = left_gen;
6233 sctx->cur_inode_new = 1;
6234 sctx->cur_inode_deleted = 0;
6235 sctx->cur_inode_size = btrfs_inode_size(
6236 sctx->left_path->nodes[0], left_ii);
6237 sctx->cur_inode_mode = btrfs_inode_mode(
6238 sctx->left_path->nodes[0], left_ii);
6239 sctx->cur_inode_rdev = btrfs_inode_rdev(
6240 sctx->left_path->nodes[0], left_ii);
6241 if (sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID)
6242 ret = send_create_inode_if_needed(sctx);
6243 } else if (result == BTRFS_COMPARE_TREE_DELETED) {
6244 sctx->cur_inode_gen = right_gen;
6245 sctx->cur_inode_new = 0;
6246 sctx->cur_inode_deleted = 1;
6247 sctx->cur_inode_size = btrfs_inode_size(
6248 sctx->right_path->nodes[0], right_ii);
6249 sctx->cur_inode_mode = btrfs_inode_mode(
6250 sctx->right_path->nodes[0], right_ii);
6251 } else if (result == BTRFS_COMPARE_TREE_CHANGED) {
6253 * We need to do some special handling in case the inode was
6254 * reported as changed with a changed generation number. This
6255 * means that the original inode was deleted and new inode
6256 * reused the same inum. So we have to treat the old inode as
6257 * deleted and the new one as new.
6259 if (sctx->cur_inode_new_gen) {
6261 * First, process the inode as if it was deleted.
6263 sctx->cur_inode_gen = right_gen;
6264 sctx->cur_inode_new = 0;
6265 sctx->cur_inode_deleted = 1;
6266 sctx->cur_inode_size = btrfs_inode_size(
6267 sctx->right_path->nodes[0], right_ii);
6268 sctx->cur_inode_mode = btrfs_inode_mode(
6269 sctx->right_path->nodes[0], right_ii);
6270 ret = process_all_refs(sctx,
6271 BTRFS_COMPARE_TREE_DELETED);
6272 if (ret < 0)
6273 goto out;
6276 * Now process the inode as if it was new.
6278 sctx->cur_inode_gen = left_gen;
6279 sctx->cur_inode_new = 1;
6280 sctx->cur_inode_deleted = 0;
6281 sctx->cur_inode_size = btrfs_inode_size(
6282 sctx->left_path->nodes[0], left_ii);
6283 sctx->cur_inode_mode = btrfs_inode_mode(
6284 sctx->left_path->nodes[0], left_ii);
6285 sctx->cur_inode_rdev = btrfs_inode_rdev(
6286 sctx->left_path->nodes[0], left_ii);
6287 ret = send_create_inode_if_needed(sctx);
6288 if (ret < 0)
6289 goto out;
6291 ret = process_all_refs(sctx, BTRFS_COMPARE_TREE_NEW);
6292 if (ret < 0)
6293 goto out;
6295 * Advance send_progress now as we did not get into
6296 * process_recorded_refs_if_needed in the new_gen case.
6298 sctx->send_progress = sctx->cur_ino + 1;
6301 * Now process all extents and xattrs of the inode as if
6302 * they were all new.
6304 ret = process_all_extents(sctx);
6305 if (ret < 0)
6306 goto out;
6307 ret = process_all_new_xattrs(sctx);
6308 if (ret < 0)
6309 goto out;
6310 } else {
6311 sctx->cur_inode_gen = left_gen;
6312 sctx->cur_inode_new = 0;
6313 sctx->cur_inode_new_gen = 0;
6314 sctx->cur_inode_deleted = 0;
6315 sctx->cur_inode_size = btrfs_inode_size(
6316 sctx->left_path->nodes[0], left_ii);
6317 sctx->cur_inode_mode = btrfs_inode_mode(
6318 sctx->left_path->nodes[0], left_ii);
6322 out:
6323 return ret;
6327 * We have to process new refs before deleted refs, but compare_trees gives us
6328 * the new and deleted refs mixed. To fix this, we record the new/deleted refs
6329 * first and later process them in process_recorded_refs.
6330 * For the cur_inode_new_gen case, we skip recording completely because
6331 * changed_inode did already initiate processing of refs. The reason for this is
6332 * that in this case, compare_tree actually compares the refs of 2 different
6333 * inodes. To fix this, process_all_refs is used in changed_inode to handle all
6334 * refs of the right tree as deleted and all refs of the left tree as new.
6336 static int changed_ref(struct send_ctx *sctx,
6337 enum btrfs_compare_tree_result result)
6339 int ret = 0;
6341 if (sctx->cur_ino != sctx->cmp_key->objectid) {
6342 inconsistent_snapshot_error(sctx, result, "reference");
6343 return -EIO;
6346 if (!sctx->cur_inode_new_gen &&
6347 sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID) {
6348 if (result == BTRFS_COMPARE_TREE_NEW)
6349 ret = record_new_ref(sctx);
6350 else if (result == BTRFS_COMPARE_TREE_DELETED)
6351 ret = record_deleted_ref(sctx);
6352 else if (result == BTRFS_COMPARE_TREE_CHANGED)
6353 ret = record_changed_ref(sctx);
6356 return ret;
6360 * Process new/deleted/changed xattrs. We skip processing in the
6361 * cur_inode_new_gen case because changed_inode did already initiate processing
6362 * of xattrs. The reason is the same as in changed_ref
6364 static int changed_xattr(struct send_ctx *sctx,
6365 enum btrfs_compare_tree_result result)
6367 int ret = 0;
6369 if (sctx->cur_ino != sctx->cmp_key->objectid) {
6370 inconsistent_snapshot_error(sctx, result, "xattr");
6371 return -EIO;
6374 if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) {
6375 if (result == BTRFS_COMPARE_TREE_NEW)
6376 ret = process_new_xattr(sctx);
6377 else if (result == BTRFS_COMPARE_TREE_DELETED)
6378 ret = process_deleted_xattr(sctx);
6379 else if (result == BTRFS_COMPARE_TREE_CHANGED)
6380 ret = process_changed_xattr(sctx);
6383 return ret;
6387 * Process new/deleted/changed extents. We skip processing in the
6388 * cur_inode_new_gen case because changed_inode did already initiate processing
6389 * of extents. The reason is the same as in changed_ref
6391 static int changed_extent(struct send_ctx *sctx,
6392 enum btrfs_compare_tree_result result)
6394 int ret = 0;
6397 * We have found an extent item that changed without the inode item
6398 * having changed. This can happen either after relocation (where the
6399 * disk_bytenr of an extent item is replaced at
6400 * relocation.c:replace_file_extents()) or after deduplication into a
6401 * file in both the parent and send snapshots (where an extent item can
6402 * get modified or replaced with a new one). Note that deduplication
6403 * updates the inode item, but it only changes the iversion (sequence
6404 * field in the inode item) of the inode, so if a file is deduplicated
6405 * the same amount of times in both the parent and send snapshots, its
6406 * iversion becames the same in both snapshots, whence the inode item is
6407 * the same on both snapshots.
6409 if (sctx->cur_ino != sctx->cmp_key->objectid)
6410 return 0;
6412 if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) {
6413 if (result != BTRFS_COMPARE_TREE_DELETED)
6414 ret = process_extent(sctx, sctx->left_path,
6415 sctx->cmp_key);
6418 return ret;
6421 static int dir_changed(struct send_ctx *sctx, u64 dir)
6423 u64 orig_gen, new_gen;
6424 int ret;
6426 ret = get_inode_info(sctx->send_root, dir, NULL, &new_gen, NULL, NULL,
6427 NULL, NULL);
6428 if (ret)
6429 return ret;
6431 ret = get_inode_info(sctx->parent_root, dir, NULL, &orig_gen, NULL,
6432 NULL, NULL, NULL);
6433 if (ret)
6434 return ret;
6436 return (orig_gen != new_gen) ? 1 : 0;
6439 static int compare_refs(struct send_ctx *sctx, struct btrfs_path *path,
6440 struct btrfs_key *key)
6442 struct btrfs_inode_extref *extref;
6443 struct extent_buffer *leaf;
6444 u64 dirid = 0, last_dirid = 0;
6445 unsigned long ptr;
6446 u32 item_size;
6447 u32 cur_offset = 0;
6448 int ref_name_len;
6449 int ret = 0;
6451 /* Easy case, just check this one dirid */
6452 if (key->type == BTRFS_INODE_REF_KEY) {
6453 dirid = key->offset;
6455 ret = dir_changed(sctx, dirid);
6456 goto out;
6459 leaf = path->nodes[0];
6460 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
6461 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
6462 while (cur_offset < item_size) {
6463 extref = (struct btrfs_inode_extref *)(ptr +
6464 cur_offset);
6465 dirid = btrfs_inode_extref_parent(leaf, extref);
6466 ref_name_len = btrfs_inode_extref_name_len(leaf, extref);
6467 cur_offset += ref_name_len + sizeof(*extref);
6468 if (dirid == last_dirid)
6469 continue;
6470 ret = dir_changed(sctx, dirid);
6471 if (ret)
6472 break;
6473 last_dirid = dirid;
6475 out:
6476 return ret;
6480 * Updates compare related fields in sctx and simply forwards to the actual
6481 * changed_xxx functions.
6483 static int changed_cb(struct btrfs_path *left_path,
6484 struct btrfs_path *right_path,
6485 struct btrfs_key *key,
6486 enum btrfs_compare_tree_result result,
6487 void *ctx)
6489 int ret = 0;
6490 struct send_ctx *sctx = ctx;
6492 if (result == BTRFS_COMPARE_TREE_SAME) {
6493 if (key->type == BTRFS_INODE_REF_KEY ||
6494 key->type == BTRFS_INODE_EXTREF_KEY) {
6495 ret = compare_refs(sctx, left_path, key);
6496 if (!ret)
6497 return 0;
6498 if (ret < 0)
6499 return ret;
6500 } else if (key->type == BTRFS_EXTENT_DATA_KEY) {
6501 return maybe_send_hole(sctx, left_path, key);
6502 } else {
6503 return 0;
6505 result = BTRFS_COMPARE_TREE_CHANGED;
6506 ret = 0;
6509 sctx->left_path = left_path;
6510 sctx->right_path = right_path;
6511 sctx->cmp_key = key;
6513 ret = finish_inode_if_needed(sctx, 0);
6514 if (ret < 0)
6515 goto out;
6517 /* Ignore non-FS objects */
6518 if (key->objectid == BTRFS_FREE_INO_OBJECTID ||
6519 key->objectid == BTRFS_FREE_SPACE_OBJECTID)
6520 goto out;
6522 if (key->type == BTRFS_INODE_ITEM_KEY) {
6523 ret = changed_inode(sctx, result);
6524 } else if (!sctx->ignore_cur_inode) {
6525 if (key->type == BTRFS_INODE_REF_KEY ||
6526 key->type == BTRFS_INODE_EXTREF_KEY)
6527 ret = changed_ref(sctx, result);
6528 else if (key->type == BTRFS_XATTR_ITEM_KEY)
6529 ret = changed_xattr(sctx, result);
6530 else if (key->type == BTRFS_EXTENT_DATA_KEY)
6531 ret = changed_extent(sctx, result);
6534 out:
6535 return ret;
6538 static int full_send_tree(struct send_ctx *sctx)
6540 int ret;
6541 struct btrfs_root *send_root = sctx->send_root;
6542 struct btrfs_key key;
6543 struct btrfs_path *path;
6544 struct extent_buffer *eb;
6545 int slot;
6547 path = alloc_path_for_send();
6548 if (!path)
6549 return -ENOMEM;
6551 key.objectid = BTRFS_FIRST_FREE_OBJECTID;
6552 key.type = BTRFS_INODE_ITEM_KEY;
6553 key.offset = 0;
6555 ret = btrfs_search_slot_for_read(send_root, &key, path, 1, 0);
6556 if (ret < 0)
6557 goto out;
6558 if (ret)
6559 goto out_finish;
6561 while (1) {
6562 eb = path->nodes[0];
6563 slot = path->slots[0];
6564 btrfs_item_key_to_cpu(eb, &key, slot);
6566 ret = changed_cb(path, NULL, &key,
6567 BTRFS_COMPARE_TREE_NEW, sctx);
6568 if (ret < 0)
6569 goto out;
6571 ret = btrfs_next_item(send_root, path);
6572 if (ret < 0)
6573 goto out;
6574 if (ret) {
6575 ret = 0;
6576 break;
6580 out_finish:
6581 ret = finish_inode_if_needed(sctx, 1);
6583 out:
6584 btrfs_free_path(path);
6585 return ret;
6588 static int tree_move_down(struct btrfs_path *path, int *level)
6590 struct extent_buffer *eb;
6592 BUG_ON(*level == 0);
6593 eb = btrfs_read_node_slot(path->nodes[*level], path->slots[*level]);
6594 if (IS_ERR(eb))
6595 return PTR_ERR(eb);
6597 path->nodes[*level - 1] = eb;
6598 path->slots[*level - 1] = 0;
6599 (*level)--;
6600 return 0;
6603 static int tree_move_next_or_upnext(struct btrfs_path *path,
6604 int *level, int root_level)
6606 int ret = 0;
6607 int nritems;
6608 nritems = btrfs_header_nritems(path->nodes[*level]);
6610 path->slots[*level]++;
6612 while (path->slots[*level] >= nritems) {
6613 if (*level == root_level)
6614 return -1;
6616 /* move upnext */
6617 path->slots[*level] = 0;
6618 free_extent_buffer(path->nodes[*level]);
6619 path->nodes[*level] = NULL;
6620 (*level)++;
6621 path->slots[*level]++;
6623 nritems = btrfs_header_nritems(path->nodes[*level]);
6624 ret = 1;
6626 return ret;
6630 * Returns 1 if it had to move up and next. 0 is returned if it moved only next
6631 * or down.
6633 static int tree_advance(struct btrfs_path *path,
6634 int *level, int root_level,
6635 int allow_down,
6636 struct btrfs_key *key)
6638 int ret;
6640 if (*level == 0 || !allow_down) {
6641 ret = tree_move_next_or_upnext(path, level, root_level);
6642 } else {
6643 ret = tree_move_down(path, level);
6645 if (ret >= 0) {
6646 if (*level == 0)
6647 btrfs_item_key_to_cpu(path->nodes[*level], key,
6648 path->slots[*level]);
6649 else
6650 btrfs_node_key_to_cpu(path->nodes[*level], key,
6651 path->slots[*level]);
6653 return ret;
6656 static int tree_compare_item(struct btrfs_path *left_path,
6657 struct btrfs_path *right_path,
6658 char *tmp_buf)
6660 int cmp;
6661 int len1, len2;
6662 unsigned long off1, off2;
6664 len1 = btrfs_item_size_nr(left_path->nodes[0], left_path->slots[0]);
6665 len2 = btrfs_item_size_nr(right_path->nodes[0], right_path->slots[0]);
6666 if (len1 != len2)
6667 return 1;
6669 off1 = btrfs_item_ptr_offset(left_path->nodes[0], left_path->slots[0]);
6670 off2 = btrfs_item_ptr_offset(right_path->nodes[0],
6671 right_path->slots[0]);
6673 read_extent_buffer(left_path->nodes[0], tmp_buf, off1, len1);
6675 cmp = memcmp_extent_buffer(right_path->nodes[0], tmp_buf, off2, len1);
6676 if (cmp)
6677 return 1;
6678 return 0;
6682 * This function compares two trees and calls the provided callback for
6683 * every changed/new/deleted item it finds.
6684 * If shared tree blocks are encountered, whole subtrees are skipped, making
6685 * the compare pretty fast on snapshotted subvolumes.
6687 * This currently works on commit roots only. As commit roots are read only,
6688 * we don't do any locking. The commit roots are protected with transactions.
6689 * Transactions are ended and rejoined when a commit is tried in between.
6691 * This function checks for modifications done to the trees while comparing.
6692 * If it detects a change, it aborts immediately.
6694 static int btrfs_compare_trees(struct btrfs_root *left_root,
6695 struct btrfs_root *right_root,
6696 btrfs_changed_cb_t changed_cb, void *ctx)
6698 struct btrfs_fs_info *fs_info = left_root->fs_info;
6699 int ret;
6700 int cmp;
6701 struct btrfs_path *left_path = NULL;
6702 struct btrfs_path *right_path = NULL;
6703 struct btrfs_key left_key;
6704 struct btrfs_key right_key;
6705 char *tmp_buf = NULL;
6706 int left_root_level;
6707 int right_root_level;
6708 int left_level;
6709 int right_level;
6710 int left_end_reached;
6711 int right_end_reached;
6712 int advance_left;
6713 int advance_right;
6714 u64 left_blockptr;
6715 u64 right_blockptr;
6716 u64 left_gen;
6717 u64 right_gen;
6719 left_path = btrfs_alloc_path();
6720 if (!left_path) {
6721 ret = -ENOMEM;
6722 goto out;
6724 right_path = btrfs_alloc_path();
6725 if (!right_path) {
6726 ret = -ENOMEM;
6727 goto out;
6730 tmp_buf = kvmalloc(fs_info->nodesize, GFP_KERNEL);
6731 if (!tmp_buf) {
6732 ret = -ENOMEM;
6733 goto out;
6736 left_path->search_commit_root = 1;
6737 left_path->skip_locking = 1;
6738 right_path->search_commit_root = 1;
6739 right_path->skip_locking = 1;
6742 * Strategy: Go to the first items of both trees. Then do
6744 * If both trees are at level 0
6745 * Compare keys of current items
6746 * If left < right treat left item as new, advance left tree
6747 * and repeat
6748 * If left > right treat right item as deleted, advance right tree
6749 * and repeat
6750 * If left == right do deep compare of items, treat as changed if
6751 * needed, advance both trees and repeat
6752 * If both trees are at the same level but not at level 0
6753 * Compare keys of current nodes/leafs
6754 * If left < right advance left tree and repeat
6755 * If left > right advance right tree and repeat
6756 * If left == right compare blockptrs of the next nodes/leafs
6757 * If they match advance both trees but stay at the same level
6758 * and repeat
6759 * If they don't match advance both trees while allowing to go
6760 * deeper and repeat
6761 * If tree levels are different
6762 * Advance the tree that needs it and repeat
6764 * Advancing a tree means:
6765 * If we are at level 0, try to go to the next slot. If that's not
6766 * possible, go one level up and repeat. Stop when we found a level
6767 * where we could go to the next slot. We may at this point be on a
6768 * node or a leaf.
6770 * If we are not at level 0 and not on shared tree blocks, go one
6771 * level deeper.
6773 * If we are not at level 0 and on shared tree blocks, go one slot to
6774 * the right if possible or go up and right.
6777 down_read(&fs_info->commit_root_sem);
6778 left_level = btrfs_header_level(left_root->commit_root);
6779 left_root_level = left_level;
6780 left_path->nodes[left_level] =
6781 btrfs_clone_extent_buffer(left_root->commit_root);
6782 if (!left_path->nodes[left_level]) {
6783 up_read(&fs_info->commit_root_sem);
6784 ret = -ENOMEM;
6785 goto out;
6788 right_level = btrfs_header_level(right_root->commit_root);
6789 right_root_level = right_level;
6790 right_path->nodes[right_level] =
6791 btrfs_clone_extent_buffer(right_root->commit_root);
6792 if (!right_path->nodes[right_level]) {
6793 up_read(&fs_info->commit_root_sem);
6794 ret = -ENOMEM;
6795 goto out;
6797 up_read(&fs_info->commit_root_sem);
6799 if (left_level == 0)
6800 btrfs_item_key_to_cpu(left_path->nodes[left_level],
6801 &left_key, left_path->slots[left_level]);
6802 else
6803 btrfs_node_key_to_cpu(left_path->nodes[left_level],
6804 &left_key, left_path->slots[left_level]);
6805 if (right_level == 0)
6806 btrfs_item_key_to_cpu(right_path->nodes[right_level],
6807 &right_key, right_path->slots[right_level]);
6808 else
6809 btrfs_node_key_to_cpu(right_path->nodes[right_level],
6810 &right_key, right_path->slots[right_level]);
6812 left_end_reached = right_end_reached = 0;
6813 advance_left = advance_right = 0;
6815 while (1) {
6816 cond_resched();
6817 if (advance_left && !left_end_reached) {
6818 ret = tree_advance(left_path, &left_level,
6819 left_root_level,
6820 advance_left != ADVANCE_ONLY_NEXT,
6821 &left_key);
6822 if (ret == -1)
6823 left_end_reached = ADVANCE;
6824 else if (ret < 0)
6825 goto out;
6826 advance_left = 0;
6828 if (advance_right && !right_end_reached) {
6829 ret = tree_advance(right_path, &right_level,
6830 right_root_level,
6831 advance_right != ADVANCE_ONLY_NEXT,
6832 &right_key);
6833 if (ret == -1)
6834 right_end_reached = ADVANCE;
6835 else if (ret < 0)
6836 goto out;
6837 advance_right = 0;
6840 if (left_end_reached && right_end_reached) {
6841 ret = 0;
6842 goto out;
6843 } else if (left_end_reached) {
6844 if (right_level == 0) {
6845 ret = changed_cb(left_path, right_path,
6846 &right_key,
6847 BTRFS_COMPARE_TREE_DELETED,
6848 ctx);
6849 if (ret < 0)
6850 goto out;
6852 advance_right = ADVANCE;
6853 continue;
6854 } else if (right_end_reached) {
6855 if (left_level == 0) {
6856 ret = changed_cb(left_path, right_path,
6857 &left_key,
6858 BTRFS_COMPARE_TREE_NEW,
6859 ctx);
6860 if (ret < 0)
6861 goto out;
6863 advance_left = ADVANCE;
6864 continue;
6867 if (left_level == 0 && right_level == 0) {
6868 cmp = btrfs_comp_cpu_keys(&left_key, &right_key);
6869 if (cmp < 0) {
6870 ret = changed_cb(left_path, right_path,
6871 &left_key,
6872 BTRFS_COMPARE_TREE_NEW,
6873 ctx);
6874 if (ret < 0)
6875 goto out;
6876 advance_left = ADVANCE;
6877 } else if (cmp > 0) {
6878 ret = changed_cb(left_path, right_path,
6879 &right_key,
6880 BTRFS_COMPARE_TREE_DELETED,
6881 ctx);
6882 if (ret < 0)
6883 goto out;
6884 advance_right = ADVANCE;
6885 } else {
6886 enum btrfs_compare_tree_result result;
6888 WARN_ON(!extent_buffer_uptodate(left_path->nodes[0]));
6889 ret = tree_compare_item(left_path, right_path,
6890 tmp_buf);
6891 if (ret)
6892 result = BTRFS_COMPARE_TREE_CHANGED;
6893 else
6894 result = BTRFS_COMPARE_TREE_SAME;
6895 ret = changed_cb(left_path, right_path,
6896 &left_key, result, ctx);
6897 if (ret < 0)
6898 goto out;
6899 advance_left = ADVANCE;
6900 advance_right = ADVANCE;
6902 } else if (left_level == right_level) {
6903 cmp = btrfs_comp_cpu_keys(&left_key, &right_key);
6904 if (cmp < 0) {
6905 advance_left = ADVANCE;
6906 } else if (cmp > 0) {
6907 advance_right = ADVANCE;
6908 } else {
6909 left_blockptr = btrfs_node_blockptr(
6910 left_path->nodes[left_level],
6911 left_path->slots[left_level]);
6912 right_blockptr = btrfs_node_blockptr(
6913 right_path->nodes[right_level],
6914 right_path->slots[right_level]);
6915 left_gen = btrfs_node_ptr_generation(
6916 left_path->nodes[left_level],
6917 left_path->slots[left_level]);
6918 right_gen = btrfs_node_ptr_generation(
6919 right_path->nodes[right_level],
6920 right_path->slots[right_level]);
6921 if (left_blockptr == right_blockptr &&
6922 left_gen == right_gen) {
6924 * As we're on a shared block, don't
6925 * allow to go deeper.
6927 advance_left = ADVANCE_ONLY_NEXT;
6928 advance_right = ADVANCE_ONLY_NEXT;
6929 } else {
6930 advance_left = ADVANCE;
6931 advance_right = ADVANCE;
6934 } else if (left_level < right_level) {
6935 advance_right = ADVANCE;
6936 } else {
6937 advance_left = ADVANCE;
6941 out:
6942 btrfs_free_path(left_path);
6943 btrfs_free_path(right_path);
6944 kvfree(tmp_buf);
6945 return ret;
6948 static int send_subvol(struct send_ctx *sctx)
6950 int ret;
6952 if (!(sctx->flags & BTRFS_SEND_FLAG_OMIT_STREAM_HEADER)) {
6953 ret = send_header(sctx);
6954 if (ret < 0)
6955 goto out;
6958 ret = send_subvol_begin(sctx);
6959 if (ret < 0)
6960 goto out;
6962 if (sctx->parent_root) {
6963 ret = btrfs_compare_trees(sctx->send_root, sctx->parent_root,
6964 changed_cb, sctx);
6965 if (ret < 0)
6966 goto out;
6967 ret = finish_inode_if_needed(sctx, 1);
6968 if (ret < 0)
6969 goto out;
6970 } else {
6971 ret = full_send_tree(sctx);
6972 if (ret < 0)
6973 goto out;
6976 out:
6977 free_recorded_refs(sctx);
6978 return ret;
6982 * If orphan cleanup did remove any orphans from a root, it means the tree
6983 * was modified and therefore the commit root is not the same as the current
6984 * root anymore. This is a problem, because send uses the commit root and
6985 * therefore can see inode items that don't exist in the current root anymore,
6986 * and for example make calls to btrfs_iget, which will do tree lookups based
6987 * on the current root and not on the commit root. Those lookups will fail,
6988 * returning a -ESTALE error, and making send fail with that error. So make
6989 * sure a send does not see any orphans we have just removed, and that it will
6990 * see the same inodes regardless of whether a transaction commit happened
6991 * before it started (meaning that the commit root will be the same as the
6992 * current root) or not.
6994 static int ensure_commit_roots_uptodate(struct send_ctx *sctx)
6996 int i;
6997 struct btrfs_trans_handle *trans = NULL;
6999 again:
7000 if (sctx->parent_root &&
7001 sctx->parent_root->node != sctx->parent_root->commit_root)
7002 goto commit_trans;
7004 for (i = 0; i < sctx->clone_roots_cnt; i++)
7005 if (sctx->clone_roots[i].root->node !=
7006 sctx->clone_roots[i].root->commit_root)
7007 goto commit_trans;
7009 if (trans)
7010 return btrfs_end_transaction(trans);
7012 return 0;
7014 commit_trans:
7015 /* Use any root, all fs roots will get their commit roots updated. */
7016 if (!trans) {
7017 trans = btrfs_join_transaction(sctx->send_root);
7018 if (IS_ERR(trans))
7019 return PTR_ERR(trans);
7020 goto again;
7023 return btrfs_commit_transaction(trans);
7027 * Make sure any existing dellaloc is flushed for any root used by a send
7028 * operation so that we do not miss any data and we do not race with writeback
7029 * finishing and changing a tree while send is using the tree. This could
7030 * happen if a subvolume is in RW mode, has delalloc, is turned to RO mode and
7031 * a send operation then uses the subvolume.
7032 * After flushing delalloc ensure_commit_roots_uptodate() must be called.
7034 static int flush_delalloc_roots(struct send_ctx *sctx)
7036 struct btrfs_root *root = sctx->parent_root;
7037 int ret;
7038 int i;
7040 if (root) {
7041 ret = btrfs_start_delalloc_snapshot(root);
7042 if (ret)
7043 return ret;
7044 btrfs_wait_ordered_extents(root, U64_MAX, 0, U64_MAX);
7047 for (i = 0; i < sctx->clone_roots_cnt; i++) {
7048 root = sctx->clone_roots[i].root;
7049 ret = btrfs_start_delalloc_snapshot(root);
7050 if (ret)
7051 return ret;
7052 btrfs_wait_ordered_extents(root, U64_MAX, 0, U64_MAX);
7055 return 0;
7058 static void btrfs_root_dec_send_in_progress(struct btrfs_root* root)
7060 spin_lock(&root->root_item_lock);
7061 root->send_in_progress--;
7063 * Not much left to do, we don't know why it's unbalanced and
7064 * can't blindly reset it to 0.
7066 if (root->send_in_progress < 0)
7067 btrfs_err(root->fs_info,
7068 "send_in_progress unbalanced %d root %llu",
7069 root->send_in_progress, root->root_key.objectid);
7070 spin_unlock(&root->root_item_lock);
7073 static void dedupe_in_progress_warn(const struct btrfs_root *root)
7075 btrfs_warn_rl(root->fs_info,
7076 "cannot use root %llu for send while deduplications on it are in progress (%d in progress)",
7077 root->root_key.objectid, root->dedupe_in_progress);
7080 long btrfs_ioctl_send(struct file *mnt_file, struct btrfs_ioctl_send_args *arg)
7082 int ret = 0;
7083 struct btrfs_root *send_root = BTRFS_I(file_inode(mnt_file))->root;
7084 struct btrfs_fs_info *fs_info = send_root->fs_info;
7085 struct btrfs_root *clone_root;
7086 struct send_ctx *sctx = NULL;
7087 u32 i;
7088 u64 *clone_sources_tmp = NULL;
7089 int clone_sources_to_rollback = 0;
7090 unsigned alloc_size;
7091 int sort_clone_roots = 0;
7093 if (!capable(CAP_SYS_ADMIN))
7094 return -EPERM;
7097 * The subvolume must remain read-only during send, protect against
7098 * making it RW. This also protects against deletion.
7100 spin_lock(&send_root->root_item_lock);
7101 if (btrfs_root_readonly(send_root) && send_root->dedupe_in_progress) {
7102 dedupe_in_progress_warn(send_root);
7103 spin_unlock(&send_root->root_item_lock);
7104 return -EAGAIN;
7106 send_root->send_in_progress++;
7107 spin_unlock(&send_root->root_item_lock);
7110 * Userspace tools do the checks and warn the user if it's
7111 * not RO.
7113 if (!btrfs_root_readonly(send_root)) {
7114 ret = -EPERM;
7115 goto out;
7119 * Check that we don't overflow at later allocations, we request
7120 * clone_sources_count + 1 items, and compare to unsigned long inside
7121 * access_ok.
7123 if (arg->clone_sources_count >
7124 ULONG_MAX / sizeof(struct clone_root) - 1) {
7125 ret = -EINVAL;
7126 goto out;
7129 if (arg->flags & ~BTRFS_SEND_FLAG_MASK) {
7130 ret = -EINVAL;
7131 goto out;
7134 sctx = kzalloc(sizeof(struct send_ctx), GFP_KERNEL);
7135 if (!sctx) {
7136 ret = -ENOMEM;
7137 goto out;
7140 INIT_LIST_HEAD(&sctx->new_refs);
7141 INIT_LIST_HEAD(&sctx->deleted_refs);
7142 INIT_RADIX_TREE(&sctx->name_cache, GFP_KERNEL);
7143 INIT_LIST_HEAD(&sctx->name_cache_list);
7145 sctx->flags = arg->flags;
7147 sctx->send_filp = fget(arg->send_fd);
7148 if (!sctx->send_filp) {
7149 ret = -EBADF;
7150 goto out;
7153 sctx->send_root = send_root;
7155 * Unlikely but possible, if the subvolume is marked for deletion but
7156 * is slow to remove the directory entry, send can still be started
7158 if (btrfs_root_dead(sctx->send_root)) {
7159 ret = -EPERM;
7160 goto out;
7163 sctx->clone_roots_cnt = arg->clone_sources_count;
7165 sctx->send_max_size = BTRFS_SEND_BUF_SIZE;
7166 sctx->send_buf = kvmalloc(sctx->send_max_size, GFP_KERNEL);
7167 if (!sctx->send_buf) {
7168 ret = -ENOMEM;
7169 goto out;
7172 sctx->read_buf = kvmalloc(BTRFS_SEND_READ_SIZE, GFP_KERNEL);
7173 if (!sctx->read_buf) {
7174 ret = -ENOMEM;
7175 goto out;
7178 sctx->pending_dir_moves = RB_ROOT;
7179 sctx->waiting_dir_moves = RB_ROOT;
7180 sctx->orphan_dirs = RB_ROOT;
7182 alloc_size = sizeof(struct clone_root) * (arg->clone_sources_count + 1);
7184 sctx->clone_roots = kzalloc(alloc_size, GFP_KERNEL);
7185 if (!sctx->clone_roots) {
7186 ret = -ENOMEM;
7187 goto out;
7190 alloc_size = arg->clone_sources_count * sizeof(*arg->clone_sources);
7192 if (arg->clone_sources_count) {
7193 clone_sources_tmp = kvmalloc(alloc_size, GFP_KERNEL);
7194 if (!clone_sources_tmp) {
7195 ret = -ENOMEM;
7196 goto out;
7199 ret = copy_from_user(clone_sources_tmp, arg->clone_sources,
7200 alloc_size);
7201 if (ret) {
7202 ret = -EFAULT;
7203 goto out;
7206 for (i = 0; i < arg->clone_sources_count; i++) {
7207 clone_root = btrfs_get_fs_root(fs_info,
7208 clone_sources_tmp[i], true);
7209 if (IS_ERR(clone_root)) {
7210 ret = PTR_ERR(clone_root);
7211 goto out;
7213 spin_lock(&clone_root->root_item_lock);
7214 if (!btrfs_root_readonly(clone_root) ||
7215 btrfs_root_dead(clone_root)) {
7216 spin_unlock(&clone_root->root_item_lock);
7217 btrfs_put_root(clone_root);
7218 ret = -EPERM;
7219 goto out;
7221 if (clone_root->dedupe_in_progress) {
7222 dedupe_in_progress_warn(clone_root);
7223 spin_unlock(&clone_root->root_item_lock);
7224 btrfs_put_root(clone_root);
7225 ret = -EAGAIN;
7226 goto out;
7228 clone_root->send_in_progress++;
7229 spin_unlock(&clone_root->root_item_lock);
7231 sctx->clone_roots[i].root = clone_root;
7232 clone_sources_to_rollback = i + 1;
7234 kvfree(clone_sources_tmp);
7235 clone_sources_tmp = NULL;
7238 if (arg->parent_root) {
7239 sctx->parent_root = btrfs_get_fs_root(fs_info, arg->parent_root,
7240 true);
7241 if (IS_ERR(sctx->parent_root)) {
7242 ret = PTR_ERR(sctx->parent_root);
7243 goto out;
7246 spin_lock(&sctx->parent_root->root_item_lock);
7247 sctx->parent_root->send_in_progress++;
7248 if (!btrfs_root_readonly(sctx->parent_root) ||
7249 btrfs_root_dead(sctx->parent_root)) {
7250 spin_unlock(&sctx->parent_root->root_item_lock);
7251 ret = -EPERM;
7252 goto out;
7254 if (sctx->parent_root->dedupe_in_progress) {
7255 dedupe_in_progress_warn(sctx->parent_root);
7256 spin_unlock(&sctx->parent_root->root_item_lock);
7257 ret = -EAGAIN;
7258 goto out;
7260 spin_unlock(&sctx->parent_root->root_item_lock);
7264 * Clones from send_root are allowed, but only if the clone source
7265 * is behind the current send position. This is checked while searching
7266 * for possible clone sources.
7268 sctx->clone_roots[sctx->clone_roots_cnt++].root =
7269 btrfs_grab_root(sctx->send_root);
7271 /* We do a bsearch later */
7272 sort(sctx->clone_roots, sctx->clone_roots_cnt,
7273 sizeof(*sctx->clone_roots), __clone_root_cmp_sort,
7274 NULL);
7275 sort_clone_roots = 1;
7277 ret = flush_delalloc_roots(sctx);
7278 if (ret)
7279 goto out;
7281 ret = ensure_commit_roots_uptodate(sctx);
7282 if (ret)
7283 goto out;
7285 mutex_lock(&fs_info->balance_mutex);
7286 if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) {
7287 mutex_unlock(&fs_info->balance_mutex);
7288 btrfs_warn_rl(fs_info,
7289 "cannot run send because a balance operation is in progress");
7290 ret = -EAGAIN;
7291 goto out;
7293 fs_info->send_in_progress++;
7294 mutex_unlock(&fs_info->balance_mutex);
7296 current->journal_info = BTRFS_SEND_TRANS_STUB;
7297 ret = send_subvol(sctx);
7298 current->journal_info = NULL;
7299 mutex_lock(&fs_info->balance_mutex);
7300 fs_info->send_in_progress--;
7301 mutex_unlock(&fs_info->balance_mutex);
7302 if (ret < 0)
7303 goto out;
7305 if (!(sctx->flags & BTRFS_SEND_FLAG_OMIT_END_CMD)) {
7306 ret = begin_cmd(sctx, BTRFS_SEND_C_END);
7307 if (ret < 0)
7308 goto out;
7309 ret = send_cmd(sctx);
7310 if (ret < 0)
7311 goto out;
7314 out:
7315 WARN_ON(sctx && !ret && !RB_EMPTY_ROOT(&sctx->pending_dir_moves));
7316 while (sctx && !RB_EMPTY_ROOT(&sctx->pending_dir_moves)) {
7317 struct rb_node *n;
7318 struct pending_dir_move *pm;
7320 n = rb_first(&sctx->pending_dir_moves);
7321 pm = rb_entry(n, struct pending_dir_move, node);
7322 while (!list_empty(&pm->list)) {
7323 struct pending_dir_move *pm2;
7325 pm2 = list_first_entry(&pm->list,
7326 struct pending_dir_move, list);
7327 free_pending_move(sctx, pm2);
7329 free_pending_move(sctx, pm);
7332 WARN_ON(sctx && !ret && !RB_EMPTY_ROOT(&sctx->waiting_dir_moves));
7333 while (sctx && !RB_EMPTY_ROOT(&sctx->waiting_dir_moves)) {
7334 struct rb_node *n;
7335 struct waiting_dir_move *dm;
7337 n = rb_first(&sctx->waiting_dir_moves);
7338 dm = rb_entry(n, struct waiting_dir_move, node);
7339 rb_erase(&dm->node, &sctx->waiting_dir_moves);
7340 kfree(dm);
7343 WARN_ON(sctx && !ret && !RB_EMPTY_ROOT(&sctx->orphan_dirs));
7344 while (sctx && !RB_EMPTY_ROOT(&sctx->orphan_dirs)) {
7345 struct rb_node *n;
7346 struct orphan_dir_info *odi;
7348 n = rb_first(&sctx->orphan_dirs);
7349 odi = rb_entry(n, struct orphan_dir_info, node);
7350 free_orphan_dir_info(sctx, odi);
7353 if (sort_clone_roots) {
7354 for (i = 0; i < sctx->clone_roots_cnt; i++) {
7355 btrfs_root_dec_send_in_progress(
7356 sctx->clone_roots[i].root);
7357 btrfs_put_root(sctx->clone_roots[i].root);
7359 } else {
7360 for (i = 0; sctx && i < clone_sources_to_rollback; i++) {
7361 btrfs_root_dec_send_in_progress(
7362 sctx->clone_roots[i].root);
7363 btrfs_put_root(sctx->clone_roots[i].root);
7366 btrfs_root_dec_send_in_progress(send_root);
7368 if (sctx && !IS_ERR_OR_NULL(sctx->parent_root)) {
7369 btrfs_root_dec_send_in_progress(sctx->parent_root);
7370 btrfs_put_root(sctx->parent_root);
7373 kvfree(clone_sources_tmp);
7375 if (sctx) {
7376 if (sctx->send_filp)
7377 fput(sctx->send_filp);
7379 kvfree(sctx->clone_roots);
7380 kvfree(sctx->send_buf);
7381 kvfree(sctx->read_buf);
7383 name_cache_free(sctx);
7385 kfree(sctx);
7388 return ret;