1 // SPDX-License-Identifier: GPL-2.0
3 #include "btree_update.h"
4 #include "btree_update_interior.h"
8 #include "extent_update.h"
11 * This counts the number of iterators to the alloc & ec btrees we'll need
12 * inserting/removing this extent:
14 static unsigned bch2_bkey_nr_alloc_ptrs(struct bkey_s_c k
)
16 struct bkey_ptrs_c ptrs
= bch2_bkey_ptrs_c(k
);
17 const union bch_extent_entry
*entry
;
18 unsigned ret
= 0, lru
= 0;
20 bkey_extent_entry_for_each(ptrs
, entry
) {
21 switch (__extent_entry_type(entry
)) {
22 case BCH_EXTENT_ENTRY_ptr
:
23 /* Might also be updating LRU btree */
24 if (entry
->ptr
.cached
)
28 case BCH_EXTENT_ENTRY_stripe_ptr
:
34 * Updating keys in the alloc btree may also update keys in the
35 * freespace or discard btrees:
40 static int count_iters_for_insert(struct btree_trans
*trans
,
47 int ret
= 0, ret2
= 0;
49 if (*nr_iters
>= max_iters
) {
50 *end
= bpos_min(*end
, k
.k
->p
);
56 case KEY_TYPE_reflink_v
:
57 *nr_iters
+= bch2_bkey_nr_alloc_ptrs(k
);
59 if (*nr_iters
>= max_iters
) {
60 *end
= bpos_min(*end
, k
.k
->p
);
65 case KEY_TYPE_reflink_p
: {
66 struct bkey_s_c_reflink_p p
= bkey_s_c_to_reflink_p(k
);
67 u64 idx
= le64_to_cpu(p
.v
->idx
);
68 unsigned sectors
= bpos_min(*end
, p
.k
->p
).offset
-
69 bkey_start_offset(p
.k
);
70 struct btree_iter iter
;
73 for_each_btree_key_norestart(trans
, iter
,
74 BTREE_ID_reflink
, POS(0, idx
+ offset
),
75 BTREE_ITER_slots
, r_k
, ret2
) {
76 if (bkey_ge(bkey_start_pos(r_k
.k
), POS(0, idx
+ sectors
)))
79 /* extent_update_to_keys(), for the reflink_v update */
82 *nr_iters
+= 1 + bch2_bkey_nr_alloc_ptrs(r_k
);
84 if (*nr_iters
>= max_iters
) {
85 struct bpos pos
= bkey_start_pos(k
.k
);
86 pos
.offset
+= min_t(u64
, k
.k
->size
,
87 r_k
.k
->p
.offset
- idx
);
89 *end
= bpos_min(*end
, pos
);
94 bch2_trans_iter_exit(trans
, &iter
);
103 #define EXTENT_ITERS_MAX (BTREE_ITER_INITIAL / 3)
105 int bch2_extent_atomic_end(struct btree_trans
*trans
,
106 struct btree_iter
*iter
,
107 struct bkey_i
*insert
,
110 struct btree_iter copy
;
112 unsigned nr_iters
= 0;
115 ret
= bch2_btree_iter_traverse(iter
);
121 /* extent_update_to_keys(): */
124 ret
= count_iters_for_insert(trans
, bkey_i_to_s_c(insert
), 0, end
,
125 &nr_iters
, EXTENT_ITERS_MAX
/ 2);
129 bch2_trans_copy_iter(©
, iter
);
131 for_each_btree_key_upto_continue_norestart(copy
, insert
->k
.p
, 0, k
, ret
) {
134 if (bkey_gt(bkey_start_pos(&insert
->k
), bkey_start_pos(k
.k
)))
135 offset
= bkey_start_offset(&insert
->k
) -
136 bkey_start_offset(k
.k
);
138 /* extent_handle_overwrites(): */
139 switch (bch2_extent_overlap(&insert
->k
, k
.k
)) {
140 case BCH_EXTENT_OVERLAP_ALL
:
141 case BCH_EXTENT_OVERLAP_FRONT
:
144 case BCH_EXTENT_OVERLAP_BACK
:
145 case BCH_EXTENT_OVERLAP_MIDDLE
:
150 ret
= count_iters_for_insert(trans
, k
, offset
, end
,
151 &nr_iters
, EXTENT_ITERS_MAX
);
156 bch2_trans_iter_exit(trans
, ©
);
157 return ret
< 0 ? ret
: 0;
160 int bch2_extent_trim_atomic(struct btree_trans
*trans
,
161 struct btree_iter
*iter
,
167 ret
= bch2_extent_atomic_end(trans
, iter
, k
, &end
);
171 bch2_cut_back(end
, k
);