Linux 6.13-rc4
[linux.git] / fs / bcachefs / data_update.c
blob8e75a852b3584e37d3775e520d6fb990b0182b93
1 // SPDX-License-Identifier: GPL-2.0
3 #include "bcachefs.h"
4 #include "alloc_foreground.h"
5 #include "bkey_buf.h"
6 #include "btree_update.h"
7 #include "buckets.h"
8 #include "compress.h"
9 #include "data_update.h"
10 #include "disk_groups.h"
11 #include "ec.h"
12 #include "error.h"
13 #include "extents.h"
14 #include "io_write.h"
15 #include "keylist.h"
16 #include "move.h"
17 #include "nocow_locking.h"
18 #include "rebalance.h"
19 #include "snapshot.h"
20 #include "subvolume.h"
21 #include "trace.h"
23 static void bkey_put_dev_refs(struct bch_fs *c, struct bkey_s_c k)
25 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
27 bkey_for_each_ptr(ptrs, ptr)
28 bch2_dev_put(bch2_dev_have_ref(c, ptr->dev));
31 static bool bkey_get_dev_refs(struct bch_fs *c, struct bkey_s_c k)
33 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
35 bkey_for_each_ptr(ptrs, ptr) {
36 if (!bch2_dev_tryget(c, ptr->dev)) {
37 bkey_for_each_ptr(ptrs, ptr2) {
38 if (ptr2 == ptr)
39 break;
40 bch2_dev_put(bch2_dev_have_ref(c, ptr2->dev));
42 return false;
45 return true;
48 static void bkey_nocow_unlock(struct bch_fs *c, struct bkey_s_c k)
50 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
52 bkey_for_each_ptr(ptrs, ptr) {
53 struct bch_dev *ca = bch2_dev_have_ref(c, ptr->dev);
54 struct bpos bucket = PTR_BUCKET_POS(ca, ptr);
56 bch2_bucket_nocow_unlock(&c->nocow_locks, bucket, 0);
60 static bool bkey_nocow_lock(struct bch_fs *c, struct moving_context *ctxt, struct bkey_s_c k)
62 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
64 bkey_for_each_ptr(ptrs, ptr) {
65 struct bch_dev *ca = bch2_dev_have_ref(c, ptr->dev);
66 struct bpos bucket = PTR_BUCKET_POS(ca, ptr);
68 if (ctxt) {
69 bool locked;
71 move_ctxt_wait_event(ctxt,
72 (locked = bch2_bucket_nocow_trylock(&c->nocow_locks, bucket, 0)) ||
73 list_empty(&ctxt->ios));
75 if (!locked)
76 bch2_bucket_nocow_lock(&c->nocow_locks, bucket, 0);
77 } else {
78 if (!bch2_bucket_nocow_trylock(&c->nocow_locks, bucket, 0)) {
79 bkey_for_each_ptr(ptrs, ptr2) {
80 if (ptr2 == ptr)
81 break;
83 ca = bch2_dev_have_ref(c, ptr2->dev);
84 bucket = PTR_BUCKET_POS(ca, ptr2);
85 bch2_bucket_nocow_unlock(&c->nocow_locks, bucket, 0);
87 return false;
91 return true;
94 static void trace_move_extent_finish2(struct bch_fs *c, struct bkey_s_c k)
96 if (trace_move_extent_finish_enabled()) {
97 struct printbuf buf = PRINTBUF;
99 bch2_bkey_val_to_text(&buf, c, k);
100 trace_move_extent_finish(c, buf.buf);
101 printbuf_exit(&buf);
105 static void trace_move_extent_fail2(struct data_update *m,
106 struct bkey_s_c new,
107 struct bkey_s_c wrote,
108 struct bkey_i *insert,
109 const char *msg)
111 struct bch_fs *c = m->op.c;
112 struct bkey_s_c old = bkey_i_to_s_c(m->k.k);
113 const union bch_extent_entry *entry;
114 struct bch_extent_ptr *ptr;
115 struct extent_ptr_decoded p;
116 struct printbuf buf = PRINTBUF;
117 unsigned i, rewrites_found = 0;
119 if (!trace_move_extent_fail_enabled())
120 return;
122 prt_str(&buf, msg);
124 if (insert) {
125 i = 0;
126 bkey_for_each_ptr_decode(old.k, bch2_bkey_ptrs_c(old), p, entry) {
127 if (((1U << i) & m->data_opts.rewrite_ptrs) &&
128 (ptr = bch2_extent_has_ptr(old, p, bkey_i_to_s(insert))) &&
129 !ptr->cached)
130 rewrites_found |= 1U << i;
131 i++;
135 prt_printf(&buf, "\nrewrite ptrs: %u%u%u%u",
136 (m->data_opts.rewrite_ptrs & (1 << 0)) != 0,
137 (m->data_opts.rewrite_ptrs & (1 << 1)) != 0,
138 (m->data_opts.rewrite_ptrs & (1 << 2)) != 0,
139 (m->data_opts.rewrite_ptrs & (1 << 3)) != 0);
141 prt_printf(&buf, "\nrewrites found: %u%u%u%u",
142 (rewrites_found & (1 << 0)) != 0,
143 (rewrites_found & (1 << 1)) != 0,
144 (rewrites_found & (1 << 2)) != 0,
145 (rewrites_found & (1 << 3)) != 0);
147 prt_str(&buf, "\nold: ");
148 bch2_bkey_val_to_text(&buf, c, old);
150 prt_str(&buf, "\nnew: ");
151 bch2_bkey_val_to_text(&buf, c, new);
153 prt_str(&buf, "\nwrote: ");
154 bch2_bkey_val_to_text(&buf, c, wrote);
156 if (insert) {
157 prt_str(&buf, "\ninsert: ");
158 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(insert));
161 trace_move_extent_fail(c, buf.buf);
162 printbuf_exit(&buf);
165 static int __bch2_data_update_index_update(struct btree_trans *trans,
166 struct bch_write_op *op)
168 struct bch_fs *c = op->c;
169 struct btree_iter iter;
170 struct data_update *m =
171 container_of(op, struct data_update, op);
172 struct keylist *keys = &op->insert_keys;
173 struct bkey_buf _new, _insert;
174 int ret = 0;
176 bch2_bkey_buf_init(&_new);
177 bch2_bkey_buf_init(&_insert);
178 bch2_bkey_buf_realloc(&_insert, c, U8_MAX);
180 bch2_trans_iter_init(trans, &iter, m->btree_id,
181 bkey_start_pos(&bch2_keylist_front(keys)->k),
182 BTREE_ITER_slots|BTREE_ITER_intent);
184 while (1) {
185 struct bkey_s_c k;
186 struct bkey_s_c old = bkey_i_to_s_c(m->k.k);
187 struct bkey_i *insert = NULL;
188 struct bkey_i_extent *new;
189 const union bch_extent_entry *entry_c;
190 union bch_extent_entry *entry;
191 struct extent_ptr_decoded p;
192 struct bch_extent_ptr *ptr;
193 const struct bch_extent_ptr *ptr_c;
194 struct bpos next_pos;
195 bool should_check_enospc;
196 s64 i_sectors_delta = 0, disk_sectors_delta = 0;
197 unsigned rewrites_found = 0, durability, i;
199 bch2_trans_begin(trans);
201 k = bch2_btree_iter_peek_slot(&iter);
202 ret = bkey_err(k);
203 if (ret)
204 goto err;
206 new = bkey_i_to_extent(bch2_keylist_front(keys));
208 if (!bch2_extents_match(k, old)) {
209 trace_move_extent_fail2(m, k, bkey_i_to_s_c(&new->k_i),
210 NULL, "no match:");
211 goto nowork;
214 bkey_reassemble(_insert.k, k);
215 insert = _insert.k;
217 bch2_bkey_buf_copy(&_new, c, bch2_keylist_front(keys));
218 new = bkey_i_to_extent(_new.k);
219 bch2_cut_front(iter.pos, &new->k_i);
221 bch2_cut_front(iter.pos, insert);
222 bch2_cut_back(new->k.p, insert);
223 bch2_cut_back(insert->k.p, &new->k_i);
226 * @old: extent that we read from
227 * @insert: key that we're going to update, initialized from
228 * extent currently in btree - same as @old unless we raced with
229 * other updates
230 * @new: extent with new pointers that we'll be adding to @insert
232 * Fist, drop rewrite_ptrs from @new:
234 i = 0;
235 bkey_for_each_ptr_decode(old.k, bch2_bkey_ptrs_c(old), p, entry_c) {
236 if (((1U << i) & m->data_opts.rewrite_ptrs) &&
237 (ptr = bch2_extent_has_ptr(old, p, bkey_i_to_s(insert))) &&
238 !ptr->cached) {
239 bch2_extent_ptr_set_cached(c, &m->op.opts,
240 bkey_i_to_s(insert), ptr);
241 rewrites_found |= 1U << i;
243 i++;
246 if (m->data_opts.rewrite_ptrs &&
247 !rewrites_found &&
248 bch2_bkey_durability(c, k) >= m->op.opts.data_replicas) {
249 trace_move_extent_fail2(m, k, bkey_i_to_s_c(&new->k_i), insert, "no rewrites found:");
250 goto nowork;
254 * A replica that we just wrote might conflict with a replica
255 * that we want to keep, due to racing with another move:
257 restart_drop_conflicting_replicas:
258 extent_for_each_ptr(extent_i_to_s(new), ptr)
259 if ((ptr_c = bch2_bkey_has_device_c(bkey_i_to_s_c(insert), ptr->dev)) &&
260 !ptr_c->cached) {
261 bch2_bkey_drop_ptr_noerror(bkey_i_to_s(&new->k_i), ptr);
262 goto restart_drop_conflicting_replicas;
265 if (!bkey_val_u64s(&new->k)) {
266 trace_move_extent_fail2(m, k, bkey_i_to_s_c(&new->k_i), insert, "new replicas conflicted:");
267 goto nowork;
270 /* Now, drop pointers that conflict with what we just wrote: */
271 extent_for_each_ptr_decode(extent_i_to_s(new), p, entry)
272 if ((ptr = bch2_bkey_has_device(bkey_i_to_s(insert), p.ptr.dev)))
273 bch2_bkey_drop_ptr_noerror(bkey_i_to_s(insert), ptr);
275 durability = bch2_bkey_durability(c, bkey_i_to_s_c(insert)) +
276 bch2_bkey_durability(c, bkey_i_to_s_c(&new->k_i));
278 /* Now, drop excess replicas: */
279 rcu_read_lock();
280 restart_drop_extra_replicas:
281 bkey_for_each_ptr_decode(old.k, bch2_bkey_ptrs(bkey_i_to_s(insert)), p, entry) {
282 unsigned ptr_durability = bch2_extent_ptr_durability(c, &p);
284 if (!p.ptr.cached &&
285 durability - ptr_durability >= m->op.opts.data_replicas) {
286 durability -= ptr_durability;
288 bch2_extent_ptr_set_cached(c, &m->op.opts,
289 bkey_i_to_s(insert), &entry->ptr);
290 goto restart_drop_extra_replicas;
293 rcu_read_unlock();
295 /* Finally, add the pointers we just wrote: */
296 extent_for_each_ptr_decode(extent_i_to_s(new), p, entry)
297 bch2_extent_ptr_decoded_append(insert, &p);
299 bch2_bkey_narrow_crcs(insert, (struct bch_extent_crc_unpacked) { 0 });
300 bch2_extent_normalize_by_opts(c, &m->op.opts, bkey_i_to_s(insert));
302 ret = bch2_sum_sector_overwrites(trans, &iter, insert,
303 &should_check_enospc,
304 &i_sectors_delta,
305 &disk_sectors_delta);
306 if (ret)
307 goto err;
309 if (disk_sectors_delta > (s64) op->res.sectors) {
310 ret = bch2_disk_reservation_add(c, &op->res,
311 disk_sectors_delta - op->res.sectors,
312 !should_check_enospc
313 ? BCH_DISK_RESERVATION_NOFAIL : 0);
314 if (ret)
315 goto out;
318 next_pos = insert->k.p;
321 * Check for nonce offset inconsistency:
322 * This is debug code - we've been seeing this bug rarely, and
323 * it's been hard to reproduce, so this should give us some more
324 * information when it does occur:
326 int invalid = bch2_bkey_validate(c, bkey_i_to_s_c(insert), __btree_node_type(0, m->btree_id),
327 BCH_VALIDATE_commit);
328 if (invalid) {
329 struct printbuf buf = PRINTBUF;
331 prt_str(&buf, "about to insert invalid key in data update path");
332 prt_str(&buf, "\nold: ");
333 bch2_bkey_val_to_text(&buf, c, old);
334 prt_str(&buf, "\nk: ");
335 bch2_bkey_val_to_text(&buf, c, k);
336 prt_str(&buf, "\nnew: ");
337 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(insert));
339 bch2_print_string_as_lines(KERN_ERR, buf.buf);
340 printbuf_exit(&buf);
342 bch2_fatal_error(c);
343 ret = -EIO;
344 goto out;
347 if (trace_data_update_enabled()) {
348 struct printbuf buf = PRINTBUF;
350 prt_str(&buf, "\nold: ");
351 bch2_bkey_val_to_text(&buf, c, old);
352 prt_str(&buf, "\nk: ");
353 bch2_bkey_val_to_text(&buf, c, k);
354 prt_str(&buf, "\nnew: ");
355 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(insert));
357 trace_data_update(c, buf.buf);
358 printbuf_exit(&buf);
361 ret = bch2_insert_snapshot_whiteouts(trans, m->btree_id,
362 k.k->p, bkey_start_pos(&insert->k)) ?:
363 bch2_insert_snapshot_whiteouts(trans, m->btree_id,
364 k.k->p, insert->k.p) ?:
365 bch2_bkey_set_needs_rebalance(c, insert, &op->opts) ?:
366 bch2_trans_update(trans, &iter, insert,
367 BTREE_UPDATE_internal_snapshot_node) ?:
368 bch2_trans_commit(trans, &op->res,
369 NULL,
370 BCH_TRANS_COMMIT_no_check_rw|
371 BCH_TRANS_COMMIT_no_enospc|
372 m->data_opts.btree_insert_flags);
373 if (!ret) {
374 bch2_btree_iter_set_pos(&iter, next_pos);
376 this_cpu_add(c->counters[BCH_COUNTER_move_extent_finish], new->k.size);
377 trace_move_extent_finish2(c, bkey_i_to_s_c(&new->k_i));
379 err:
380 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
381 ret = 0;
382 if (ret)
383 break;
384 next:
385 while (bkey_ge(iter.pos, bch2_keylist_front(keys)->k.p)) {
386 bch2_keylist_pop_front(keys);
387 if (bch2_keylist_empty(keys))
388 goto out;
390 continue;
391 nowork:
392 if (m->stats) {
393 BUG_ON(k.k->p.offset <= iter.pos.offset);
394 atomic64_inc(&m->stats->keys_raced);
395 atomic64_add(k.k->p.offset - iter.pos.offset,
396 &m->stats->sectors_raced);
399 count_event(c, move_extent_fail);
401 bch2_btree_iter_advance(&iter);
402 goto next;
404 out:
405 bch2_trans_iter_exit(trans, &iter);
406 bch2_bkey_buf_exit(&_insert, c);
407 bch2_bkey_buf_exit(&_new, c);
408 BUG_ON(bch2_err_matches(ret, BCH_ERR_transaction_restart));
409 return ret;
412 int bch2_data_update_index_update(struct bch_write_op *op)
414 return bch2_trans_run(op->c, __bch2_data_update_index_update(trans, op));
417 void bch2_data_update_read_done(struct data_update *m,
418 struct bch_extent_crc_unpacked crc)
420 /* write bio must own pages: */
421 BUG_ON(!m->op.wbio.bio.bi_vcnt);
423 m->op.crc = crc;
424 m->op.wbio.bio.bi_iter.bi_size = crc.compressed_size << 9;
426 closure_call(&m->op.cl, bch2_write, NULL, NULL);
429 void bch2_data_update_exit(struct data_update *update)
431 struct bch_fs *c = update->op.c;
432 struct bkey_s_c k = bkey_i_to_s_c(update->k.k);
434 if (c->opts.nocow_enabled)
435 bkey_nocow_unlock(c, k);
436 bkey_put_dev_refs(c, k);
437 bch2_bkey_buf_exit(&update->k, c);
438 bch2_disk_reservation_put(c, &update->op.res);
439 bch2_bio_free_pages_pool(c, &update->op.wbio.bio);
442 static void bch2_update_unwritten_extent(struct btree_trans *trans,
443 struct data_update *update)
445 struct bch_fs *c = update->op.c;
446 struct bio *bio = &update->op.wbio.bio;
447 struct bkey_i_extent *e;
448 struct write_point *wp;
449 struct closure cl;
450 struct btree_iter iter;
451 struct bkey_s_c k;
452 int ret;
454 closure_init_stack(&cl);
455 bch2_keylist_init(&update->op.insert_keys, update->op.inline_keys);
457 while (bio_sectors(bio)) {
458 unsigned sectors = bio_sectors(bio);
460 bch2_trans_begin(trans);
462 bch2_trans_iter_init(trans, &iter, update->btree_id, update->op.pos,
463 BTREE_ITER_slots);
464 ret = lockrestart_do(trans, ({
465 k = bch2_btree_iter_peek_slot(&iter);
466 bkey_err(k);
467 }));
468 bch2_trans_iter_exit(trans, &iter);
470 if (ret || !bch2_extents_match(k, bkey_i_to_s_c(update->k.k)))
471 break;
473 e = bkey_extent_init(update->op.insert_keys.top);
474 e->k.p = update->op.pos;
476 ret = bch2_alloc_sectors_start_trans(trans,
477 update->op.target,
478 false,
479 update->op.write_point,
480 &update->op.devs_have,
481 update->op.nr_replicas,
482 update->op.nr_replicas,
483 update->op.watermark,
484 0, &cl, &wp);
485 if (bch2_err_matches(ret, BCH_ERR_operation_blocked)) {
486 bch2_trans_unlock(trans);
487 closure_sync(&cl);
488 continue;
491 bch_err_fn_ratelimited(c, ret);
493 if (ret)
494 return;
496 sectors = min(sectors, wp->sectors_free);
498 bch2_key_resize(&e->k, sectors);
500 bch2_open_bucket_get(c, wp, &update->op.open_buckets);
501 bch2_alloc_sectors_append_ptrs(c, wp, &e->k_i, sectors, false);
502 bch2_alloc_sectors_done(c, wp);
504 bio_advance(bio, sectors << 9);
505 update->op.pos.offset += sectors;
507 extent_for_each_ptr(extent_i_to_s(e), ptr)
508 ptr->unwritten = true;
509 bch2_keylist_push(&update->op.insert_keys);
511 ret = __bch2_data_update_index_update(trans, &update->op);
513 bch2_open_buckets_put(c, &update->op.open_buckets);
515 if (ret)
516 break;
519 if (closure_nr_remaining(&cl) != 1) {
520 bch2_trans_unlock(trans);
521 closure_sync(&cl);
525 void bch2_data_update_opts_to_text(struct printbuf *out, struct bch_fs *c,
526 struct bch_io_opts *io_opts,
527 struct data_update_opts *data_opts)
529 printbuf_tabstop_push(out, 20);
530 prt_str(out, "rewrite ptrs:\t");
531 bch2_prt_u64_base2(out, data_opts->rewrite_ptrs);
532 prt_newline(out);
534 prt_str(out, "kill ptrs:\t");
535 bch2_prt_u64_base2(out, data_opts->kill_ptrs);
536 prt_newline(out);
538 prt_str(out, "target:\t");
539 bch2_target_to_text(out, c, data_opts->target);
540 prt_newline(out);
542 prt_str(out, "compression:\t");
543 bch2_compression_opt_to_text(out, background_compression(*io_opts));
544 prt_newline(out);
546 prt_str(out, "opts.replicas:\t");
547 prt_u64(out, io_opts->data_replicas);
549 prt_str(out, "extra replicas:\t");
550 prt_u64(out, data_opts->extra_replicas);
553 void bch2_data_update_to_text(struct printbuf *out, struct data_update *m)
555 bch2_bkey_val_to_text(out, m->op.c, bkey_i_to_s_c(m->k.k));
556 prt_newline(out);
557 bch2_data_update_opts_to_text(out, m->op.c, &m->op.opts, &m->data_opts);
560 int bch2_extent_drop_ptrs(struct btree_trans *trans,
561 struct btree_iter *iter,
562 struct bkey_s_c k,
563 struct bch_io_opts *io_opts,
564 struct data_update_opts *data_opts)
566 struct bch_fs *c = trans->c;
567 struct bkey_i *n;
568 int ret;
570 n = bch2_bkey_make_mut_noupdate(trans, k);
571 ret = PTR_ERR_OR_ZERO(n);
572 if (ret)
573 return ret;
575 while (data_opts->kill_ptrs) {
576 unsigned i = 0, drop = __fls(data_opts->kill_ptrs);
578 bch2_bkey_drop_ptrs_noerror(bkey_i_to_s(n), ptr, i++ == drop);
579 data_opts->kill_ptrs ^= 1U << drop;
583 * If the new extent no longer has any pointers, bch2_extent_normalize()
584 * will do the appropriate thing with it (turning it into a
585 * KEY_TYPE_error key, or just a discard if it was a cached extent)
587 bch2_extent_normalize_by_opts(c, io_opts, bkey_i_to_s(n));
590 * Since we're not inserting through an extent iterator
591 * (BTREE_ITER_all_snapshots iterators aren't extent iterators),
592 * we aren't using the extent overwrite path to delete, we're
593 * just using the normal key deletion path:
595 if (bkey_deleted(&n->k) && !(iter->flags & BTREE_ITER_is_extents))
596 n->k.size = 0;
598 return bch2_trans_relock(trans) ?:
599 bch2_trans_update(trans, iter, n, BTREE_UPDATE_internal_snapshot_node) ?:
600 bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc);
603 int bch2_data_update_init(struct btree_trans *trans,
604 struct btree_iter *iter,
605 struct moving_context *ctxt,
606 struct data_update *m,
607 struct write_point_specifier wp,
608 struct bch_io_opts io_opts,
609 struct data_update_opts data_opts,
610 enum btree_id btree_id,
611 struct bkey_s_c k)
613 struct bch_fs *c = trans->c;
614 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
615 const union bch_extent_entry *entry;
616 struct extent_ptr_decoded p;
617 unsigned i, reserve_sectors = k.k->size * data_opts.extra_replicas;
618 int ret = 0;
621 * fs is corrupt we have a key for a snapshot node that doesn't exist,
622 * and we have to check for this because we go rw before repairing the
623 * snapshots table - just skip it, we can move it later.
625 if (unlikely(k.k->p.snapshot && !bch2_snapshot_equiv(c, k.k->p.snapshot)))
626 return -BCH_ERR_data_update_done;
628 if (!bkey_get_dev_refs(c, k))
629 return -BCH_ERR_data_update_done;
631 if (c->opts.nocow_enabled &&
632 !bkey_nocow_lock(c, ctxt, k)) {
633 bkey_put_dev_refs(c, k);
634 return -BCH_ERR_nocow_lock_blocked;
637 bch2_bkey_buf_init(&m->k);
638 bch2_bkey_buf_reassemble(&m->k, c, k);
639 m->btree_id = btree_id;
640 m->data_opts = data_opts;
641 m->ctxt = ctxt;
642 m->stats = ctxt ? ctxt->stats : NULL;
644 bch2_write_op_init(&m->op, c, io_opts);
645 m->op.pos = bkey_start_pos(k.k);
646 m->op.version = k.k->bversion;
647 m->op.target = data_opts.target;
648 m->op.write_point = wp;
649 m->op.nr_replicas = 0;
650 m->op.flags |= BCH_WRITE_PAGES_STABLE|
651 BCH_WRITE_PAGES_OWNED|
652 BCH_WRITE_DATA_ENCODED|
653 BCH_WRITE_MOVE|
654 m->data_opts.write_flags;
655 m->op.compression_opt = background_compression(io_opts);
656 m->op.watermark = m->data_opts.btree_insert_flags & BCH_WATERMARK_MASK;
658 unsigned durability_have = 0, durability_removing = 0;
660 i = 0;
661 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
662 if (!p.ptr.cached) {
663 rcu_read_lock();
664 if (BIT(i) & m->data_opts.rewrite_ptrs) {
665 if (crc_is_compressed(p.crc))
666 reserve_sectors += k.k->size;
668 m->op.nr_replicas += bch2_extent_ptr_desired_durability(c, &p);
669 durability_removing += bch2_extent_ptr_desired_durability(c, &p);
670 } else if (!(BIT(i) & m->data_opts.kill_ptrs)) {
671 bch2_dev_list_add_dev(&m->op.devs_have, p.ptr.dev);
672 durability_have += bch2_extent_ptr_durability(c, &p);
674 rcu_read_unlock();
678 * op->csum_type is normally initialized from the fs/file's
679 * current options - but if an extent is encrypted, we require
680 * that it stays encrypted:
682 if (bch2_csum_type_is_encryption(p.crc.csum_type)) {
683 m->op.nonce = p.crc.nonce + p.crc.offset;
684 m->op.csum_type = p.crc.csum_type;
687 if (p.crc.compression_type == BCH_COMPRESSION_TYPE_incompressible)
688 m->op.incompressible = true;
690 i++;
693 unsigned durability_required = max(0, (int) (io_opts.data_replicas - durability_have));
696 * If current extent durability is less than io_opts.data_replicas,
697 * we're not trying to rereplicate the extent up to data_replicas here -
698 * unless extra_replicas was specified
700 * Increasing replication is an explicit operation triggered by
701 * rereplicate, currently, so that users don't get an unexpected -ENOSPC
703 m->op.nr_replicas = min(durability_removing, durability_required) +
704 m->data_opts.extra_replicas;
707 * If device(s) were set to durability=0 after data was written to them
708 * we can end up with a duribilty=0 extent, and the normal algorithm
709 * that tries not to increase durability doesn't work:
711 if (!(durability_have + durability_removing))
712 m->op.nr_replicas = max((unsigned) m->op.nr_replicas, 1);
714 m->op.nr_replicas_required = m->op.nr_replicas;
717 * It might turn out that we don't need any new replicas, if the
718 * replicas or durability settings have been changed since the extent
719 * was written:
721 if (!m->op.nr_replicas) {
722 m->data_opts.kill_ptrs |= m->data_opts.rewrite_ptrs;
723 m->data_opts.rewrite_ptrs = 0;
724 /* if iter == NULL, it's just a promote */
725 if (iter)
726 ret = bch2_extent_drop_ptrs(trans, iter, k, &io_opts, &m->data_opts);
727 goto out;
730 if (reserve_sectors) {
731 ret = bch2_disk_reservation_add(c, &m->op.res, reserve_sectors,
732 m->data_opts.extra_replicas
734 : BCH_DISK_RESERVATION_NOFAIL);
735 if (ret)
736 goto out;
739 if (bkey_extent_is_unwritten(k)) {
740 bch2_update_unwritten_extent(trans, m);
741 goto out;
744 return 0;
745 out:
746 bch2_data_update_exit(m);
747 return ret ?: -BCH_ERR_data_update_done;
750 void bch2_data_update_opts_normalize(struct bkey_s_c k, struct data_update_opts *opts)
752 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
753 unsigned i = 0;
755 bkey_for_each_ptr(ptrs, ptr) {
756 if ((opts->rewrite_ptrs & (1U << i)) && ptr->cached) {
757 opts->kill_ptrs |= 1U << i;
758 opts->rewrite_ptrs ^= 1U << i;
761 i++;