Linux 6.13-rc4
[linux.git] / fs / bcachefs / replicas.c
blob477ef0997949bf6dab11f739d0d547db92b6d3fb
1 // SPDX-License-Identifier: GPL-2.0
3 #include "bcachefs.h"
4 #include "buckets.h"
5 #include "disk_accounting.h"
6 #include "journal.h"
7 #include "replicas.h"
8 #include "super-io.h"
10 #include <linux/sort.h>
12 static int bch2_cpu_replicas_to_sb_replicas(struct bch_fs *,
13 struct bch_replicas_cpu *);
15 /* Some (buggy!) compilers don't allow memcmp to be passed as a pointer */
16 static int bch2_memcmp(const void *l, const void *r, const void *priv)
18 size_t size = (size_t) priv;
19 return memcmp(l, r, size);
22 /* Replicas tracking - in memory: */
24 static void verify_replicas_entry(struct bch_replicas_entry_v1 *e)
26 #ifdef CONFIG_BCACHEFS_DEBUG
27 BUG_ON(!e->nr_devs);
28 BUG_ON(e->nr_required > 1 &&
29 e->nr_required >= e->nr_devs);
31 for (unsigned i = 0; i + 1 < e->nr_devs; i++)
32 BUG_ON(e->devs[i] >= e->devs[i + 1]);
33 #endif
36 void bch2_replicas_entry_sort(struct bch_replicas_entry_v1 *e)
38 bubble_sort(e->devs, e->nr_devs, u8_cmp);
41 static void bch2_cpu_replicas_sort(struct bch_replicas_cpu *r)
43 eytzinger0_sort_r(r->entries, r->nr, r->entry_size,
44 bch2_memcmp, NULL, (void *)(size_t)r->entry_size);
47 static void bch2_replicas_entry_v0_to_text(struct printbuf *out,
48 struct bch_replicas_entry_v0 *e)
50 bch2_prt_data_type(out, e->data_type);
52 prt_printf(out, ": %u [", e->nr_devs);
53 for (unsigned i = 0; i < e->nr_devs; i++)
54 prt_printf(out, i ? " %u" : "%u", e->devs[i]);
55 prt_printf(out, "]");
58 void bch2_replicas_entry_to_text(struct printbuf *out,
59 struct bch_replicas_entry_v1 *e)
61 bch2_prt_data_type(out, e->data_type);
63 prt_printf(out, ": %u/%u [", e->nr_required, e->nr_devs);
64 for (unsigned i = 0; i < e->nr_devs; i++)
65 prt_printf(out, i ? " %u" : "%u", e->devs[i]);
66 prt_printf(out, "]");
69 static int bch2_replicas_entry_sb_validate(struct bch_replicas_entry_v1 *r,
70 struct bch_sb *sb,
71 struct printbuf *err)
73 if (!r->nr_devs) {
74 prt_printf(err, "no devices in entry ");
75 goto bad;
78 if (r->nr_required > 1 &&
79 r->nr_required >= r->nr_devs) {
80 prt_printf(err, "bad nr_required in entry ");
81 goto bad;
84 for (unsigned i = 0; i < r->nr_devs; i++)
85 if (r->devs[i] != BCH_SB_MEMBER_INVALID &&
86 !bch2_member_exists(sb, r->devs[i])) {
87 prt_printf(err, "invalid device %u in entry ", r->devs[i]);
88 goto bad;
91 return 0;
92 bad:
93 bch2_replicas_entry_to_text(err, r);
94 return -BCH_ERR_invalid_replicas_entry;
97 int bch2_replicas_entry_validate(struct bch_replicas_entry_v1 *r,
98 struct bch_fs *c,
99 struct printbuf *err)
101 if (!r->nr_devs) {
102 prt_printf(err, "no devices in entry ");
103 goto bad;
106 if (r->nr_required > 1 &&
107 r->nr_required >= r->nr_devs) {
108 prt_printf(err, "bad nr_required in entry ");
109 goto bad;
112 for (unsigned i = 0; i < r->nr_devs; i++)
113 if (r->devs[i] != BCH_SB_MEMBER_INVALID &&
114 !bch2_dev_exists(c, r->devs[i])) {
115 prt_printf(err, "invalid device %u in entry ", r->devs[i]);
116 goto bad;
119 return 0;
120 bad:
121 bch2_replicas_entry_to_text(err, r);
122 return -BCH_ERR_invalid_replicas_entry;
125 void bch2_cpu_replicas_to_text(struct printbuf *out,
126 struct bch_replicas_cpu *r)
128 struct bch_replicas_entry_v1 *e;
129 bool first = true;
131 for_each_cpu_replicas_entry(r, e) {
132 if (!first)
133 prt_printf(out, " ");
134 first = false;
136 bch2_replicas_entry_to_text(out, e);
140 static void extent_to_replicas(struct bkey_s_c k,
141 struct bch_replicas_entry_v1 *r)
143 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
144 const union bch_extent_entry *entry;
145 struct extent_ptr_decoded p;
147 r->nr_required = 1;
149 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
150 if (p.ptr.cached)
151 continue;
153 if (!p.has_ec)
154 replicas_entry_add_dev(r, p.ptr.dev);
155 else
156 r->nr_required = 0;
160 static void stripe_to_replicas(struct bkey_s_c k,
161 struct bch_replicas_entry_v1 *r)
163 struct bkey_s_c_stripe s = bkey_s_c_to_stripe(k);
164 const struct bch_extent_ptr *ptr;
166 r->nr_required = s.v->nr_blocks - s.v->nr_redundant;
168 for (ptr = s.v->ptrs;
169 ptr < s.v->ptrs + s.v->nr_blocks;
170 ptr++)
171 replicas_entry_add_dev(r, ptr->dev);
174 void bch2_bkey_to_replicas(struct bch_replicas_entry_v1 *e,
175 struct bkey_s_c k)
177 e->nr_devs = 0;
179 switch (k.k->type) {
180 case KEY_TYPE_btree_ptr:
181 case KEY_TYPE_btree_ptr_v2:
182 e->data_type = BCH_DATA_btree;
183 extent_to_replicas(k, e);
184 break;
185 case KEY_TYPE_extent:
186 case KEY_TYPE_reflink_v:
187 e->data_type = BCH_DATA_user;
188 extent_to_replicas(k, e);
189 break;
190 case KEY_TYPE_stripe:
191 e->data_type = BCH_DATA_parity;
192 stripe_to_replicas(k, e);
193 break;
196 bch2_replicas_entry_sort(e);
199 void bch2_devlist_to_replicas(struct bch_replicas_entry_v1 *e,
200 enum bch_data_type data_type,
201 struct bch_devs_list devs)
203 BUG_ON(!data_type ||
204 data_type == BCH_DATA_sb ||
205 data_type >= BCH_DATA_NR);
207 e->data_type = data_type;
208 e->nr_devs = 0;
209 e->nr_required = 1;
211 darray_for_each(devs, i)
212 replicas_entry_add_dev(e, *i);
214 bch2_replicas_entry_sort(e);
217 static struct bch_replicas_cpu
218 cpu_replicas_add_entry(struct bch_fs *c,
219 struct bch_replicas_cpu *old,
220 struct bch_replicas_entry_v1 *new_entry)
222 struct bch_replicas_cpu new = {
223 .nr = old->nr + 1,
224 .entry_size = max_t(unsigned, old->entry_size,
225 replicas_entry_bytes(new_entry)),
228 new.entries = kcalloc(new.nr, new.entry_size, GFP_KERNEL);
229 if (!new.entries)
230 return new;
232 for (unsigned i = 0; i < old->nr; i++)
233 memcpy(cpu_replicas_entry(&new, i),
234 cpu_replicas_entry(old, i),
235 old->entry_size);
237 memcpy(cpu_replicas_entry(&new, old->nr),
238 new_entry,
239 replicas_entry_bytes(new_entry));
241 bch2_cpu_replicas_sort(&new);
242 return new;
245 static inline int __replicas_entry_idx(struct bch_replicas_cpu *r,
246 struct bch_replicas_entry_v1 *search)
248 int idx, entry_size = replicas_entry_bytes(search);
250 if (unlikely(entry_size > r->entry_size))
251 return -1;
253 #define entry_cmp(_l, _r) memcmp(_l, _r, entry_size)
254 idx = eytzinger0_find(r->entries, r->nr, r->entry_size,
255 entry_cmp, search);
256 #undef entry_cmp
258 return idx < r->nr ? idx : -1;
261 int bch2_replicas_entry_idx(struct bch_fs *c,
262 struct bch_replicas_entry_v1 *search)
264 bch2_replicas_entry_sort(search);
266 return __replicas_entry_idx(&c->replicas, search);
269 static bool __replicas_has_entry(struct bch_replicas_cpu *r,
270 struct bch_replicas_entry_v1 *search)
272 return __replicas_entry_idx(r, search) >= 0;
275 bool bch2_replicas_marked_locked(struct bch_fs *c,
276 struct bch_replicas_entry_v1 *search)
278 verify_replicas_entry(search);
280 return !search->nr_devs ||
281 (__replicas_has_entry(&c->replicas, search) &&
282 (likely((!c->replicas_gc.entries)) ||
283 __replicas_has_entry(&c->replicas_gc, search)));
286 bool bch2_replicas_marked(struct bch_fs *c,
287 struct bch_replicas_entry_v1 *search)
289 percpu_down_read(&c->mark_lock);
290 bool ret = bch2_replicas_marked_locked(c, search);
291 percpu_up_read(&c->mark_lock);
293 return ret;
296 noinline
297 static int bch2_mark_replicas_slowpath(struct bch_fs *c,
298 struct bch_replicas_entry_v1 *new_entry)
300 struct bch_replicas_cpu new_r, new_gc;
301 int ret = 0;
303 verify_replicas_entry(new_entry);
305 memset(&new_r, 0, sizeof(new_r));
306 memset(&new_gc, 0, sizeof(new_gc));
308 mutex_lock(&c->sb_lock);
310 if (c->replicas_gc.entries &&
311 !__replicas_has_entry(&c->replicas_gc, new_entry)) {
312 new_gc = cpu_replicas_add_entry(c, &c->replicas_gc, new_entry);
313 if (!new_gc.entries) {
314 ret = -BCH_ERR_ENOMEM_cpu_replicas;
315 goto err;
319 if (!__replicas_has_entry(&c->replicas, new_entry)) {
320 new_r = cpu_replicas_add_entry(c, &c->replicas, new_entry);
321 if (!new_r.entries) {
322 ret = -BCH_ERR_ENOMEM_cpu_replicas;
323 goto err;
326 ret = bch2_cpu_replicas_to_sb_replicas(c, &new_r);
327 if (ret)
328 goto err;
331 if (!new_r.entries &&
332 !new_gc.entries)
333 goto out;
335 /* allocations done, now commit: */
337 if (new_r.entries)
338 bch2_write_super(c);
340 /* don't update in memory replicas until changes are persistent */
341 percpu_down_write(&c->mark_lock);
342 if (new_r.entries)
343 swap(c->replicas, new_r);
344 if (new_gc.entries)
345 swap(new_gc, c->replicas_gc);
346 percpu_up_write(&c->mark_lock);
347 out:
348 mutex_unlock(&c->sb_lock);
350 kfree(new_r.entries);
351 kfree(new_gc.entries);
353 return ret;
354 err:
355 bch_err_msg(c, ret, "adding replicas entry");
356 goto out;
359 int bch2_mark_replicas(struct bch_fs *c, struct bch_replicas_entry_v1 *r)
361 return likely(bch2_replicas_marked(c, r))
362 ? 0 : bch2_mark_replicas_slowpath(c, r);
366 * Old replicas_gc mechanism: only used for journal replicas entries now, should
367 * die at some point:
370 int bch2_replicas_gc_end(struct bch_fs *c, int ret)
372 lockdep_assert_held(&c->replicas_gc_lock);
374 mutex_lock(&c->sb_lock);
375 percpu_down_write(&c->mark_lock);
377 ret = ret ?:
378 bch2_cpu_replicas_to_sb_replicas(c, &c->replicas_gc);
379 if (!ret)
380 swap(c->replicas, c->replicas_gc);
382 kfree(c->replicas_gc.entries);
383 c->replicas_gc.entries = NULL;
385 percpu_up_write(&c->mark_lock);
387 if (!ret)
388 bch2_write_super(c);
390 mutex_unlock(&c->sb_lock);
392 return ret;
395 int bch2_replicas_gc_start(struct bch_fs *c, unsigned typemask)
397 struct bch_replicas_entry_v1 *e;
398 unsigned i = 0;
400 lockdep_assert_held(&c->replicas_gc_lock);
402 mutex_lock(&c->sb_lock);
403 BUG_ON(c->replicas_gc.entries);
405 c->replicas_gc.nr = 0;
406 c->replicas_gc.entry_size = 0;
408 for_each_cpu_replicas_entry(&c->replicas, e) {
409 /* Preserve unknown data types */
410 if (e->data_type >= BCH_DATA_NR ||
411 !((1 << e->data_type) & typemask)) {
412 c->replicas_gc.nr++;
413 c->replicas_gc.entry_size =
414 max_t(unsigned, c->replicas_gc.entry_size,
415 replicas_entry_bytes(e));
419 c->replicas_gc.entries = kcalloc(c->replicas_gc.nr,
420 c->replicas_gc.entry_size,
421 GFP_KERNEL);
422 if (!c->replicas_gc.entries) {
423 mutex_unlock(&c->sb_lock);
424 bch_err(c, "error allocating c->replicas_gc");
425 return -BCH_ERR_ENOMEM_replicas_gc;
428 for_each_cpu_replicas_entry(&c->replicas, e)
429 if (e->data_type >= BCH_DATA_NR ||
430 !((1 << e->data_type) & typemask))
431 memcpy(cpu_replicas_entry(&c->replicas_gc, i++),
432 e, c->replicas_gc.entry_size);
434 bch2_cpu_replicas_sort(&c->replicas_gc);
435 mutex_unlock(&c->sb_lock);
437 return 0;
441 * New much simpler mechanism for clearing out unneeded replicas entries - drop
442 * replicas entries that have 0 sectors used.
444 * However, we don't track sector counts for journal usage, so this doesn't drop
445 * any BCH_DATA_journal entries; the old bch2_replicas_gc_(start|end) mechanism
446 * is retained for that.
448 int bch2_replicas_gc2(struct bch_fs *c)
450 struct bch_replicas_cpu new = { 0 };
451 unsigned nr;
452 int ret = 0;
454 bch2_accounting_mem_gc(c);
455 retry:
456 nr = READ_ONCE(c->replicas.nr);
457 new.entry_size = READ_ONCE(c->replicas.entry_size);
458 new.entries = kcalloc(nr, new.entry_size, GFP_KERNEL);
459 if (!new.entries) {
460 bch_err(c, "error allocating c->replicas_gc");
461 return -BCH_ERR_ENOMEM_replicas_gc;
464 mutex_lock(&c->sb_lock);
465 percpu_down_write(&c->mark_lock);
467 if (nr != c->replicas.nr ||
468 new.entry_size != c->replicas.entry_size) {
469 percpu_up_write(&c->mark_lock);
470 mutex_unlock(&c->sb_lock);
471 kfree(new.entries);
472 goto retry;
475 for (unsigned i = 0; i < c->replicas.nr; i++) {
476 struct bch_replicas_entry_v1 *e =
477 cpu_replicas_entry(&c->replicas, i);
479 struct disk_accounting_pos k = {
480 .type = BCH_DISK_ACCOUNTING_replicas,
483 unsafe_memcpy(&k.replicas, e, replicas_entry_bytes(e),
484 "embedded variable length struct");
486 struct bpos p = disk_accounting_pos_to_bpos(&k);
488 struct bch_accounting_mem *acc = &c->accounting;
489 bool kill = eytzinger0_find(acc->k.data, acc->k.nr, sizeof(acc->k.data[0]),
490 accounting_pos_cmp, &p) >= acc->k.nr;
492 if (e->data_type == BCH_DATA_journal || !kill)
493 memcpy(cpu_replicas_entry(&new, new.nr++),
494 e, new.entry_size);
497 bch2_cpu_replicas_sort(&new);
499 ret = bch2_cpu_replicas_to_sb_replicas(c, &new);
501 if (!ret)
502 swap(c->replicas, new);
504 kfree(new.entries);
506 percpu_up_write(&c->mark_lock);
508 if (!ret)
509 bch2_write_super(c);
511 mutex_unlock(&c->sb_lock);
513 return ret;
516 /* Replicas tracking - superblock: */
518 static int
519 __bch2_sb_replicas_to_cpu_replicas(struct bch_sb_field_replicas *sb_r,
520 struct bch_replicas_cpu *cpu_r)
522 struct bch_replicas_entry_v1 *e, *dst;
523 unsigned nr = 0, entry_size = 0, idx = 0;
525 for_each_replicas_entry(sb_r, e) {
526 entry_size = max_t(unsigned, entry_size,
527 replicas_entry_bytes(e));
528 nr++;
531 cpu_r->entries = kcalloc(nr, entry_size, GFP_KERNEL);
532 if (!cpu_r->entries)
533 return -BCH_ERR_ENOMEM_cpu_replicas;
535 cpu_r->nr = nr;
536 cpu_r->entry_size = entry_size;
538 for_each_replicas_entry(sb_r, e) {
539 dst = cpu_replicas_entry(cpu_r, idx++);
540 memcpy(dst, e, replicas_entry_bytes(e));
541 bch2_replicas_entry_sort(dst);
544 return 0;
547 static int
548 __bch2_sb_replicas_v0_to_cpu_replicas(struct bch_sb_field_replicas_v0 *sb_r,
549 struct bch_replicas_cpu *cpu_r)
551 struct bch_replicas_entry_v0 *e;
552 unsigned nr = 0, entry_size = 0, idx = 0;
554 for_each_replicas_entry(sb_r, e) {
555 entry_size = max_t(unsigned, entry_size,
556 replicas_entry_bytes(e));
557 nr++;
560 entry_size += sizeof(struct bch_replicas_entry_v1) -
561 sizeof(struct bch_replicas_entry_v0);
563 cpu_r->entries = kcalloc(nr, entry_size, GFP_KERNEL);
564 if (!cpu_r->entries)
565 return -BCH_ERR_ENOMEM_cpu_replicas;
567 cpu_r->nr = nr;
568 cpu_r->entry_size = entry_size;
570 for_each_replicas_entry(sb_r, e) {
571 struct bch_replicas_entry_v1 *dst =
572 cpu_replicas_entry(cpu_r, idx++);
574 dst->data_type = e->data_type;
575 dst->nr_devs = e->nr_devs;
576 dst->nr_required = 1;
577 memcpy(dst->devs, e->devs, e->nr_devs);
578 bch2_replicas_entry_sort(dst);
581 return 0;
584 int bch2_sb_replicas_to_cpu_replicas(struct bch_fs *c)
586 struct bch_sb_field_replicas *sb_v1;
587 struct bch_sb_field_replicas_v0 *sb_v0;
588 struct bch_replicas_cpu new_r = { 0, 0, NULL };
589 int ret = 0;
591 if ((sb_v1 = bch2_sb_field_get(c->disk_sb.sb, replicas)))
592 ret = __bch2_sb_replicas_to_cpu_replicas(sb_v1, &new_r);
593 else if ((sb_v0 = bch2_sb_field_get(c->disk_sb.sb, replicas_v0)))
594 ret = __bch2_sb_replicas_v0_to_cpu_replicas(sb_v0, &new_r);
595 if (ret)
596 return ret;
598 bch2_cpu_replicas_sort(&new_r);
600 percpu_down_write(&c->mark_lock);
601 swap(c->replicas, new_r);
602 percpu_up_write(&c->mark_lock);
604 kfree(new_r.entries);
606 return 0;
609 static int bch2_cpu_replicas_to_sb_replicas_v0(struct bch_fs *c,
610 struct bch_replicas_cpu *r)
612 struct bch_sb_field_replicas_v0 *sb_r;
613 struct bch_replicas_entry_v0 *dst;
614 struct bch_replicas_entry_v1 *src;
615 size_t bytes;
617 bytes = sizeof(struct bch_sb_field_replicas);
619 for_each_cpu_replicas_entry(r, src)
620 bytes += replicas_entry_bytes(src) - 1;
622 sb_r = bch2_sb_field_resize(&c->disk_sb, replicas_v0,
623 DIV_ROUND_UP(bytes, sizeof(u64)));
624 if (!sb_r)
625 return -BCH_ERR_ENOSPC_sb_replicas;
627 bch2_sb_field_delete(&c->disk_sb, BCH_SB_FIELD_replicas);
628 sb_r = bch2_sb_field_get(c->disk_sb.sb, replicas_v0);
630 memset(&sb_r->entries, 0,
631 vstruct_end(&sb_r->field) -
632 (void *) &sb_r->entries);
634 dst = sb_r->entries;
635 for_each_cpu_replicas_entry(r, src) {
636 dst->data_type = src->data_type;
637 dst->nr_devs = src->nr_devs;
638 memcpy(dst->devs, src->devs, src->nr_devs);
640 dst = replicas_entry_next(dst);
642 BUG_ON((void *) dst > vstruct_end(&sb_r->field));
645 return 0;
648 static int bch2_cpu_replicas_to_sb_replicas(struct bch_fs *c,
649 struct bch_replicas_cpu *r)
651 struct bch_sb_field_replicas *sb_r;
652 struct bch_replicas_entry_v1 *dst, *src;
653 bool need_v1 = false;
654 size_t bytes;
656 bytes = sizeof(struct bch_sb_field_replicas);
658 for_each_cpu_replicas_entry(r, src) {
659 bytes += replicas_entry_bytes(src);
660 if (src->nr_required != 1)
661 need_v1 = true;
664 if (!need_v1)
665 return bch2_cpu_replicas_to_sb_replicas_v0(c, r);
667 sb_r = bch2_sb_field_resize(&c->disk_sb, replicas,
668 DIV_ROUND_UP(bytes, sizeof(u64)));
669 if (!sb_r)
670 return -BCH_ERR_ENOSPC_sb_replicas;
672 bch2_sb_field_delete(&c->disk_sb, BCH_SB_FIELD_replicas_v0);
673 sb_r = bch2_sb_field_get(c->disk_sb.sb, replicas);
675 memset(&sb_r->entries, 0,
676 vstruct_end(&sb_r->field) -
677 (void *) &sb_r->entries);
679 dst = sb_r->entries;
680 for_each_cpu_replicas_entry(r, src) {
681 memcpy(dst, src, replicas_entry_bytes(src));
683 dst = replicas_entry_next(dst);
685 BUG_ON((void *) dst > vstruct_end(&sb_r->field));
688 return 0;
691 static int bch2_cpu_replicas_validate(struct bch_replicas_cpu *cpu_r,
692 struct bch_sb *sb,
693 struct printbuf *err)
695 unsigned i;
697 sort_r(cpu_r->entries,
698 cpu_r->nr,
699 cpu_r->entry_size,
700 bch2_memcmp, NULL,
701 (void *)(size_t)cpu_r->entry_size);
703 for (i = 0; i < cpu_r->nr; i++) {
704 struct bch_replicas_entry_v1 *e =
705 cpu_replicas_entry(cpu_r, i);
707 int ret = bch2_replicas_entry_sb_validate(e, sb, err);
708 if (ret)
709 return ret;
711 if (i + 1 < cpu_r->nr) {
712 struct bch_replicas_entry_v1 *n =
713 cpu_replicas_entry(cpu_r, i + 1);
715 BUG_ON(memcmp(e, n, cpu_r->entry_size) > 0);
717 if (!memcmp(e, n, cpu_r->entry_size)) {
718 prt_printf(err, "duplicate replicas entry ");
719 bch2_replicas_entry_to_text(err, e);
720 return -BCH_ERR_invalid_sb_replicas;
725 return 0;
728 static int bch2_sb_replicas_validate(struct bch_sb *sb, struct bch_sb_field *f,
729 enum bch_validate_flags flags, struct printbuf *err)
731 struct bch_sb_field_replicas *sb_r = field_to_type(f, replicas);
732 struct bch_replicas_cpu cpu_r;
733 int ret;
735 ret = __bch2_sb_replicas_to_cpu_replicas(sb_r, &cpu_r);
736 if (ret)
737 return ret;
739 ret = bch2_cpu_replicas_validate(&cpu_r, sb, err);
740 kfree(cpu_r.entries);
741 return ret;
744 static void bch2_sb_replicas_to_text(struct printbuf *out,
745 struct bch_sb *sb,
746 struct bch_sb_field *f)
748 struct bch_sb_field_replicas *r = field_to_type(f, replicas);
749 struct bch_replicas_entry_v1 *e;
750 bool first = true;
752 for_each_replicas_entry(r, e) {
753 if (!first)
754 prt_printf(out, " ");
755 first = false;
757 bch2_replicas_entry_to_text(out, e);
759 prt_newline(out);
762 const struct bch_sb_field_ops bch_sb_field_ops_replicas = {
763 .validate = bch2_sb_replicas_validate,
764 .to_text = bch2_sb_replicas_to_text,
767 static int bch2_sb_replicas_v0_validate(struct bch_sb *sb, struct bch_sb_field *f,
768 enum bch_validate_flags flags, struct printbuf *err)
770 struct bch_sb_field_replicas_v0 *sb_r = field_to_type(f, replicas_v0);
771 struct bch_replicas_cpu cpu_r;
772 int ret;
774 ret = __bch2_sb_replicas_v0_to_cpu_replicas(sb_r, &cpu_r);
775 if (ret)
776 return ret;
778 ret = bch2_cpu_replicas_validate(&cpu_r, sb, err);
779 kfree(cpu_r.entries);
780 return ret;
783 static void bch2_sb_replicas_v0_to_text(struct printbuf *out,
784 struct bch_sb *sb,
785 struct bch_sb_field *f)
787 struct bch_sb_field_replicas_v0 *sb_r = field_to_type(f, replicas_v0);
788 struct bch_replicas_entry_v0 *e;
789 bool first = true;
791 for_each_replicas_entry(sb_r, e) {
792 if (!first)
793 prt_printf(out, " ");
794 first = false;
796 bch2_replicas_entry_v0_to_text(out, e);
798 prt_newline(out);
801 const struct bch_sb_field_ops bch_sb_field_ops_replicas_v0 = {
802 .validate = bch2_sb_replicas_v0_validate,
803 .to_text = bch2_sb_replicas_v0_to_text,
806 /* Query replicas: */
808 bool bch2_have_enough_devs(struct bch_fs *c, struct bch_devs_mask devs,
809 unsigned flags, bool print)
811 struct bch_replicas_entry_v1 *e;
812 bool ret = true;
814 percpu_down_read(&c->mark_lock);
815 for_each_cpu_replicas_entry(&c->replicas, e) {
816 unsigned nr_online = 0, nr_failed = 0, dflags = 0;
817 bool metadata = e->data_type < BCH_DATA_user;
819 if (e->data_type == BCH_DATA_cached)
820 continue;
822 rcu_read_lock();
823 for (unsigned i = 0; i < e->nr_devs; i++) {
824 if (e->devs[i] == BCH_SB_MEMBER_INVALID) {
825 nr_failed++;
826 continue;
829 nr_online += test_bit(e->devs[i], devs.d);
831 struct bch_dev *ca = bch2_dev_rcu_noerror(c, e->devs[i]);
832 nr_failed += !ca || ca->mi.state == BCH_MEMBER_STATE_failed;
834 rcu_read_unlock();
836 if (nr_online + nr_failed == e->nr_devs)
837 continue;
839 if (nr_online < e->nr_required)
840 dflags |= metadata
841 ? BCH_FORCE_IF_METADATA_LOST
842 : BCH_FORCE_IF_DATA_LOST;
844 if (nr_online < e->nr_devs)
845 dflags |= metadata
846 ? BCH_FORCE_IF_METADATA_DEGRADED
847 : BCH_FORCE_IF_DATA_DEGRADED;
849 if (dflags & ~flags) {
850 if (print) {
851 struct printbuf buf = PRINTBUF;
853 bch2_replicas_entry_to_text(&buf, e);
854 bch_err(c, "insufficient devices online (%u) for replicas entry %s",
855 nr_online, buf.buf);
856 printbuf_exit(&buf);
858 ret = false;
859 break;
863 percpu_up_read(&c->mark_lock);
865 return ret;
868 unsigned bch2_sb_dev_has_data(struct bch_sb *sb, unsigned dev)
870 struct bch_sb_field_replicas *replicas;
871 struct bch_sb_field_replicas_v0 *replicas_v0;
872 unsigned data_has = 0;
874 replicas = bch2_sb_field_get(sb, replicas);
875 replicas_v0 = bch2_sb_field_get(sb, replicas_v0);
877 if (replicas) {
878 struct bch_replicas_entry_v1 *r;
880 for_each_replicas_entry(replicas, r) {
881 if (r->data_type >= sizeof(data_has) * 8)
882 continue;
884 for (unsigned i = 0; i < r->nr_devs; i++)
885 if (r->devs[i] == dev)
886 data_has |= 1 << r->data_type;
889 } else if (replicas_v0) {
890 struct bch_replicas_entry_v0 *r;
892 for_each_replicas_entry_v0(replicas_v0, r) {
893 if (r->data_type >= sizeof(data_has) * 8)
894 continue;
896 for (unsigned i = 0; i < r->nr_devs; i++)
897 if (r->devs[i] == dev)
898 data_has |= 1 << r->data_type;
903 return data_has;
906 unsigned bch2_dev_has_data(struct bch_fs *c, struct bch_dev *ca)
908 mutex_lock(&c->sb_lock);
909 unsigned ret = bch2_sb_dev_has_data(c->disk_sb.sb, ca->dev_idx);
910 mutex_unlock(&c->sb_lock);
912 return ret;
915 void bch2_fs_replicas_exit(struct bch_fs *c)
917 kfree(c->replicas.entries);
918 kfree(c->replicas_gc.entries);