printf: Remove unused 'bprintf'
[drm/drm-misc.git] / fs / bcachefs / extent_update.c
blob5f4fecb358da03e0b64c95c12ce174bc29beb24b
1 // SPDX-License-Identifier: GPL-2.0
2 #include "bcachefs.h"
3 #include "btree_update.h"
4 #include "btree_update_interior.h"
5 #include "buckets.h"
6 #include "debug.h"
7 #include "extents.h"
8 #include "extent_update.h"
11 * This counts the number of iterators to the alloc & ec btrees we'll need
12 * inserting/removing this extent:
14 static unsigned bch2_bkey_nr_alloc_ptrs(struct bkey_s_c k)
16 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
17 const union bch_extent_entry *entry;
18 unsigned ret = 0, lru = 0;
20 bkey_extent_entry_for_each(ptrs, entry) {
21 switch (__extent_entry_type(entry)) {
22 case BCH_EXTENT_ENTRY_ptr:
23 /* Might also be updating LRU btree */
24 if (entry->ptr.cached)
25 lru++;
27 fallthrough;
28 case BCH_EXTENT_ENTRY_stripe_ptr:
29 ret++;
34 * Updating keys in the alloc btree may also update keys in the
35 * freespace or discard btrees:
37 return lru + ret * 2;
40 static int count_iters_for_insert(struct btree_trans *trans,
41 struct bkey_s_c k,
42 unsigned offset,
43 struct bpos *end,
44 unsigned *nr_iters,
45 unsigned max_iters)
47 int ret = 0, ret2 = 0;
49 if (*nr_iters >= max_iters) {
50 *end = bpos_min(*end, k.k->p);
51 ret = 1;
54 switch (k.k->type) {
55 case KEY_TYPE_extent:
56 case KEY_TYPE_reflink_v:
57 *nr_iters += bch2_bkey_nr_alloc_ptrs(k);
59 if (*nr_iters >= max_iters) {
60 *end = bpos_min(*end, k.k->p);
61 ret = 1;
64 break;
65 case KEY_TYPE_reflink_p: {
66 struct bkey_s_c_reflink_p p = bkey_s_c_to_reflink_p(k);
67 u64 idx = le64_to_cpu(p.v->idx);
68 unsigned sectors = bpos_min(*end, p.k->p).offset -
69 bkey_start_offset(p.k);
70 struct btree_iter iter;
71 struct bkey_s_c r_k;
73 for_each_btree_key_norestart(trans, iter,
74 BTREE_ID_reflink, POS(0, idx + offset),
75 BTREE_ITER_slots, r_k, ret2) {
76 if (bkey_ge(bkey_start_pos(r_k.k), POS(0, idx + sectors)))
77 break;
79 /* extent_update_to_keys(), for the reflink_v update */
80 *nr_iters += 1;
82 *nr_iters += 1 + bch2_bkey_nr_alloc_ptrs(r_k);
84 if (*nr_iters >= max_iters) {
85 struct bpos pos = bkey_start_pos(k.k);
86 pos.offset += min_t(u64, k.k->size,
87 r_k.k->p.offset - idx);
89 *end = bpos_min(*end, pos);
90 ret = 1;
91 break;
94 bch2_trans_iter_exit(trans, &iter);
96 break;
100 return ret2 ?: ret;
103 #define EXTENT_ITERS_MAX (BTREE_ITER_INITIAL / 3)
105 int bch2_extent_atomic_end(struct btree_trans *trans,
106 struct btree_iter *iter,
107 struct bkey_i *insert,
108 struct bpos *end)
110 struct btree_iter copy;
111 struct bkey_s_c k;
112 unsigned nr_iters = 0;
113 int ret;
115 ret = bch2_btree_iter_traverse(iter);
116 if (ret)
117 return ret;
119 *end = insert->k.p;
121 /* extent_update_to_keys(): */
122 nr_iters += 1;
124 ret = count_iters_for_insert(trans, bkey_i_to_s_c(insert), 0, end,
125 &nr_iters, EXTENT_ITERS_MAX / 2);
126 if (ret < 0)
127 return ret;
129 bch2_trans_copy_iter(&copy, iter);
131 for_each_btree_key_upto_continue_norestart(copy, insert->k.p, 0, k, ret) {
132 unsigned offset = 0;
134 if (bkey_gt(bkey_start_pos(&insert->k), bkey_start_pos(k.k)))
135 offset = bkey_start_offset(&insert->k) -
136 bkey_start_offset(k.k);
138 /* extent_handle_overwrites(): */
139 switch (bch2_extent_overlap(&insert->k, k.k)) {
140 case BCH_EXTENT_OVERLAP_ALL:
141 case BCH_EXTENT_OVERLAP_FRONT:
142 nr_iters += 1;
143 break;
144 case BCH_EXTENT_OVERLAP_BACK:
145 case BCH_EXTENT_OVERLAP_MIDDLE:
146 nr_iters += 2;
147 break;
150 ret = count_iters_for_insert(trans, k, offset, end,
151 &nr_iters, EXTENT_ITERS_MAX);
152 if (ret)
153 break;
156 bch2_trans_iter_exit(trans, &copy);
157 return ret < 0 ? ret : 0;
160 int bch2_extent_trim_atomic(struct btree_trans *trans,
161 struct btree_iter *iter,
162 struct bkey_i *k)
164 struct bpos end;
165 int ret;
167 ret = bch2_extent_atomic_end(trans, iter, k, &end);
168 if (ret)
169 return ret;
171 bch2_cut_back(end, k);
172 return 0;