1 // SPDX-License-Identifier: GPL-2.0
6 #include "alloc_background.h"
7 #include "alloc_foreground.h"
8 #include "backpointers.h"
12 #include "btree_update.h"
13 #include "btree_write_buffer.h"
16 #include "disk_accounting.h"
17 #include "disk_groups.h"
28 #include <linux/sort.h>
32 #include <linux/raid/pq.h>
33 #include <linux/raid/xor.h>
35 static void raid5_recov(unsigned disks
, unsigned failed_idx
,
36 size_t size
, void **data
)
40 BUG_ON(failed_idx
>= disks
);
42 swap(data
[0], data
[failed_idx
]);
43 memcpy(data
[0], data
[1], size
);
46 nr
= min_t(unsigned, disks
- i
, MAX_XOR_BLOCKS
);
47 xor_blocks(nr
, size
, data
[0], data
+ i
);
51 swap(data
[0], data
[failed_idx
]);
54 static void raid_gen(int nd
, int np
, size_t size
, void **v
)
57 raid5_recov(nd
+ np
, nd
, size
, v
);
59 raid6_call
.gen_syndrome(nd
+ np
, size
, v
);
63 static void raid_rec(int nr
, int *ir
, int nd
, int np
, size_t size
, void **v
)
70 raid5_recov(nd
+ 1, ir
[0], size
, v
);
72 raid6_call
.gen_syndrome(nd
+ np
, size
, v
);
76 /* data+data failure. */
77 raid6_2data_recov(nd
+ np
, size
, ir
[0], ir
[1], v
);
78 } else if (ir
[0] < nd
) {
79 /* data + p/q failure */
81 if (ir
[1] == nd
) /* data + p failure */
82 raid6_datap_recov(nd
+ np
, size
, ir
[0], v
);
83 else { /* data + q failure */
84 raid5_recov(nd
+ 1, ir
[0], size
, v
);
85 raid6_call
.gen_syndrome(nd
+ np
, size
, v
);
88 raid_gen(nd
, np
, size
, v
);
98 #include <raid/raid.h>
104 struct ec_stripe_buf
*buf
;
109 /* Stripes btree keys: */
111 int bch2_stripe_validate(struct bch_fs
*c
, struct bkey_s_c k
,
112 enum bch_validate_flags flags
)
114 const struct bch_stripe
*s
= bkey_s_c_to_stripe(k
).v
;
117 bkey_fsck_err_on(bkey_eq(k
.k
->p
, POS_MIN
) ||
118 bpos_gt(k
.k
->p
, POS(0, U32_MAX
)),
120 "stripe at bad pos");
122 bkey_fsck_err_on(bkey_val_u64s(k
.k
) < stripe_val_u64s(s
),
123 c
, stripe_val_size_bad
,
124 "incorrect value size (%zu < %u)",
125 bkey_val_u64s(k
.k
), stripe_val_u64s(s
));
127 bkey_fsck_err_on(s
->csum_granularity_bits
>= 64,
128 c
, stripe_csum_granularity_bad
,
129 "invalid csum granularity (%u >= 64)",
130 s
->csum_granularity_bits
);
132 ret
= bch2_bkey_ptrs_validate(c
, k
, flags
);
137 void bch2_stripe_to_text(struct printbuf
*out
, struct bch_fs
*c
,
140 const struct bch_stripe
*sp
= bkey_s_c_to_stripe(k
).v
;
141 struct bch_stripe s
= {};
143 memcpy(&s
, sp
, min(sizeof(s
), bkey_val_bytes(k
.k
)));
145 unsigned nr_data
= s
.nr_blocks
- s
.nr_redundant
;
147 prt_printf(out
, "algo %u sectors %u blocks %u:%u csum ",
149 le16_to_cpu(s
.sectors
),
152 bch2_prt_csum_type(out
, s
.csum_type
);
153 prt_str(out
, " gran ");
154 if (s
.csum_granularity_bits
< 64)
155 prt_printf(out
, "%llu", 1ULL << s
.csum_granularity_bits
);
157 prt_printf(out
, "(invalid shift %u)", s
.csum_granularity_bits
);
160 prt_str(out
, " label");
161 bch2_disk_path_to_text(out
, c
, s
.disk_label
- 1);
164 for (unsigned i
= 0; i
< s
.nr_blocks
; i
++) {
165 const struct bch_extent_ptr
*ptr
= sp
->ptrs
+ i
;
167 if ((void *) ptr
>= bkey_val_end(k
))
171 bch2_extent_ptr_to_text(out
, c
, ptr
);
173 if (s
.csum_type
< BCH_CSUM_NR
&&
175 stripe_blockcount_offset(&s
, i
) < bkey_val_bytes(k
.k
))
176 prt_printf(out
, "#%u", stripe_blockcount_get(sp
, i
));
182 static int __mark_stripe_bucket(struct btree_trans
*trans
,
184 struct bkey_s_c_stripe s
,
185 unsigned ptr_idx
, bool deleting
,
187 struct bch_alloc_v4
*a
,
188 enum btree_iter_update_trigger_flags flags
)
190 const struct bch_extent_ptr
*ptr
= s
.v
->ptrs
+ ptr_idx
;
191 unsigned nr_data
= s
.v
->nr_blocks
- s
.v
->nr_redundant
;
192 bool parity
= ptr_idx
>= nr_data
;
193 enum bch_data_type data_type
= parity
? BCH_DATA_parity
: BCH_DATA_stripe
;
194 s64 sectors
= parity
? le16_to_cpu(s
.v
->sectors
) : 0;
195 struct printbuf buf
= PRINTBUF
;
198 struct bch_fs
*c
= trans
->c
;
203 if (bch2_trans_inconsistent_on(a
->stripe
||
204 a
->stripe_redundancy
, trans
,
205 "bucket %llu:%llu gen %u data type %s dirty_sectors %u: multiple stripes using same bucket (%u, %llu)\n%s",
206 bucket
.inode
, bucket
.offset
, a
->gen
,
207 bch2_data_type_str(a
->data_type
),
209 a
->stripe
, s
.k
->p
.offset
,
210 (bch2_bkey_val_to_text(&buf
, c
, s
.s_c
), buf
.buf
))) {
211 ret
= -BCH_ERR_mark_stripe
;
215 if (bch2_trans_inconsistent_on(parity
&& bch2_bucket_sectors_total(*a
), trans
,
216 "bucket %llu:%llu gen %u data type %s dirty_sectors %u cached_sectors %u: data already in parity bucket\n%s",
217 bucket
.inode
, bucket
.offset
, a
->gen
,
218 bch2_data_type_str(a
->data_type
),
221 (bch2_bkey_val_to_text(&buf
, c
, s
.s_c
), buf
.buf
))) {
222 ret
= -BCH_ERR_mark_stripe
;
226 if (bch2_trans_inconsistent_on(a
->stripe
!= s
.k
->p
.offset
||
227 a
->stripe_redundancy
!= s
.v
->nr_redundant
, trans
,
228 "bucket %llu:%llu gen %u: not marked as stripe when deleting stripe (got %u)\n%s",
229 bucket
.inode
, bucket
.offset
, a
->gen
,
231 (bch2_bkey_val_to_text(&buf
, c
, s
.s_c
), buf
.buf
))) {
232 ret
= -BCH_ERR_mark_stripe
;
236 if (bch2_trans_inconsistent_on(a
->data_type
!= data_type
, trans
,
237 "bucket %llu:%llu gen %u data type %s: wrong data type when stripe, should be %s\n%s",
238 bucket
.inode
, bucket
.offset
, a
->gen
,
239 bch2_data_type_str(a
->data_type
),
240 bch2_data_type_str(data_type
),
241 (bch2_bkey_val_to_text(&buf
, c
, s
.s_c
), buf
.buf
))) {
242 ret
= -BCH_ERR_mark_stripe
;
246 if (bch2_trans_inconsistent_on(parity
&&
247 (a
->dirty_sectors
!= -sectors
||
248 a
->cached_sectors
), trans
,
249 "bucket %llu:%llu gen %u dirty_sectors %u cached_sectors %u: wrong sectors when deleting parity block of stripe\n%s",
250 bucket
.inode
, bucket
.offset
, a
->gen
,
253 (bch2_bkey_val_to_text(&buf
, c
, s
.s_c
), buf
.buf
))) {
254 ret
= -BCH_ERR_mark_stripe
;
260 ret
= bch2_bucket_ref_update(trans
, ca
, s
.s_c
, ptr
, sectors
, data_type
,
261 a
->gen
, a
->data_type
, &a
->dirty_sectors
);
267 a
->stripe
= s
.k
->p
.offset
;
268 a
->stripe_redundancy
= s
.v
->nr_redundant
;
269 alloc_data_type_set(a
, data_type
);
272 a
->stripe_redundancy
= 0;
273 alloc_data_type_set(a
, BCH_DATA_user
);
280 static int mark_stripe_bucket(struct btree_trans
*trans
,
281 struct bkey_s_c_stripe s
,
282 unsigned ptr_idx
, bool deleting
,
283 enum btree_iter_update_trigger_flags flags
)
285 struct bch_fs
*c
= trans
->c
;
286 const struct bch_extent_ptr
*ptr
= s
.v
->ptrs
+ ptr_idx
;
287 struct printbuf buf
= PRINTBUF
;
290 struct bch_dev
*ca
= bch2_dev_tryget(c
, ptr
->dev
);
292 if (ptr
->dev
!= BCH_SB_MEMBER_INVALID
&& !(flags
& BTREE_TRIGGER_overwrite
))
293 ret
= -BCH_ERR_mark_stripe
;
297 struct bpos bucket
= PTR_BUCKET_POS(ca
, ptr
);
299 if (flags
& BTREE_TRIGGER_transactional
) {
300 struct bkey_i_alloc_v4
*a
=
301 bch2_trans_start_alloc_update(trans
, bucket
, 0);
302 ret
= PTR_ERR_OR_ZERO(a
) ?:
303 __mark_stripe_bucket(trans
, ca
, s
, ptr_idx
, deleting
, bucket
, &a
->v
, flags
);
306 if (flags
& BTREE_TRIGGER_gc
) {
307 percpu_down_read(&c
->mark_lock
);
308 struct bucket
*g
= gc_bucket(ca
, bucket
.offset
);
309 if (bch2_fs_inconsistent_on(!g
, c
, "reference to invalid bucket on device %u\n %s",
311 (bch2_bkey_val_to_text(&buf
, c
, s
.s_c
), buf
.buf
))) {
312 ret
= -BCH_ERR_mark_stripe
;
317 struct bch_alloc_v4 old
= bucket_m_to_alloc(*g
), new = old
;
318 ret
= __mark_stripe_bucket(trans
, ca
, s
, ptr_idx
, deleting
, bucket
, &new, flags
);
319 alloc_to_bucket(g
, new);
322 percpu_up_read(&c
->mark_lock
);
324 ret
= bch2_alloc_key_to_dev_counters(trans
, ca
, &old
, &new, flags
);
332 static int mark_stripe_buckets(struct btree_trans
*trans
,
333 struct bkey_s_c old
, struct bkey_s_c
new,
334 enum btree_iter_update_trigger_flags flags
)
336 const struct bch_stripe
*old_s
= old
.k
->type
== KEY_TYPE_stripe
337 ? bkey_s_c_to_stripe(old
).v
: NULL
;
338 const struct bch_stripe
*new_s
= new.k
->type
== KEY_TYPE_stripe
339 ? bkey_s_c_to_stripe(new).v
: NULL
;
341 BUG_ON(old_s
&& new_s
&& old_s
->nr_blocks
!= new_s
->nr_blocks
);
343 unsigned nr_blocks
= new_s
? new_s
->nr_blocks
: old_s
->nr_blocks
;
345 for (unsigned i
= 0; i
< nr_blocks
; i
++) {
346 if (new_s
&& old_s
&&
347 !memcmp(&new_s
->ptrs
[i
],
349 sizeof(new_s
->ptrs
[i
])))
353 int ret
= mark_stripe_bucket(trans
,
354 bkey_s_c_to_stripe(new), i
, false, flags
);
360 int ret
= mark_stripe_bucket(trans
,
361 bkey_s_c_to_stripe(old
), i
, true, flags
);
370 static inline void stripe_to_mem(struct stripe
*m
, const struct bch_stripe
*s
)
372 m
->sectors
= le16_to_cpu(s
->sectors
);
373 m
->algorithm
= s
->algorithm
;
374 m
->nr_blocks
= s
->nr_blocks
;
375 m
->nr_redundant
= s
->nr_redundant
;
376 m
->disk_label
= s
->disk_label
;
377 m
->blocks_nonempty
= 0;
379 for (unsigned i
= 0; i
< s
->nr_blocks
; i
++)
380 m
->blocks_nonempty
+= !!stripe_blockcount_get(s
, i
);
383 int bch2_trigger_stripe(struct btree_trans
*trans
,
384 enum btree_id btree
, unsigned level
,
385 struct bkey_s_c old
, struct bkey_s _new
,
386 enum btree_iter_update_trigger_flags flags
)
388 struct bkey_s_c
new = _new
.s_c
;
389 struct bch_fs
*c
= trans
->c
;
390 u64 idx
= new.k
->p
.offset
;
391 const struct bch_stripe
*old_s
= old
.k
->type
== KEY_TYPE_stripe
392 ? bkey_s_c_to_stripe(old
).v
: NULL
;
393 const struct bch_stripe
*new_s
= new.k
->type
== KEY_TYPE_stripe
394 ? bkey_s_c_to_stripe(new).v
: NULL
;
396 if (unlikely(flags
& BTREE_TRIGGER_check_repair
))
397 return bch2_check_fix_ptrs(trans
, btree
, level
, _new
.s_c
, flags
);
399 BUG_ON(new_s
&& old_s
&&
400 (new_s
->nr_blocks
!= old_s
->nr_blocks
||
401 new_s
->nr_redundant
!= old_s
->nr_redundant
));
404 if (flags
& (BTREE_TRIGGER_transactional
|BTREE_TRIGGER_gc
)) {
406 * If the pointers aren't changing, we don't need to do anything:
408 if (new_s
&& old_s
&&
409 new_s
->nr_blocks
== old_s
->nr_blocks
&&
410 new_s
->nr_redundant
== old_s
->nr_redundant
&&
411 !memcmp(old_s
->ptrs
, new_s
->ptrs
,
412 new_s
->nr_blocks
* sizeof(struct bch_extent_ptr
)))
415 struct gc_stripe
*gc
= NULL
;
416 if (flags
& BTREE_TRIGGER_gc
) {
417 gc
= genradix_ptr_alloc(&c
->gc_stripes
, idx
, GFP_KERNEL
);
419 bch_err(c
, "error allocating memory for gc_stripes, idx %llu", idx
);
420 return -BCH_ERR_ENOMEM_mark_stripe
;
424 * This will be wrong when we bring back runtime gc: we should
425 * be unmarking the old key and then marking the new key
427 * Also: when we bring back runtime gc, locking
430 gc
->sectors
= le16_to_cpu(new_s
->sectors
);
431 gc
->nr_blocks
= new_s
->nr_blocks
;
432 gc
->nr_redundant
= new_s
->nr_redundant
;
434 for (unsigned i
= 0; i
< new_s
->nr_blocks
; i
++)
435 gc
->ptrs
[i
] = new_s
->ptrs
[i
];
438 * gc recalculates this field from stripe ptr
441 memset(gc
->block_sectors
, 0, sizeof(gc
->block_sectors
));
445 s64 sectors
= (u64
) le16_to_cpu(new_s
->sectors
) * new_s
->nr_redundant
;
447 struct disk_accounting_pos acc
= {
448 .type
= BCH_DISK_ACCOUNTING_replicas
,
450 bch2_bkey_to_replicas(&acc
.replicas
, new);
451 int ret
= bch2_disk_accounting_mod(trans
, &acc
, §ors
, 1, gc
);
456 memcpy(&gc
->r
.e
, &acc
.replicas
, replicas_entry_bytes(&acc
.replicas
));
460 s64 sectors
= -((s64
) le16_to_cpu(old_s
->sectors
)) * old_s
->nr_redundant
;
462 struct disk_accounting_pos acc
= {
463 .type
= BCH_DISK_ACCOUNTING_replicas
,
465 bch2_bkey_to_replicas(&acc
.replicas
, old
);
466 int ret
= bch2_disk_accounting_mod(trans
, &acc
, §ors
, 1, gc
);
471 int ret
= mark_stripe_buckets(trans
, old
, new, flags
);
476 if (flags
& BTREE_TRIGGER_atomic
) {
477 struct stripe
*m
= genradix_ptr(&c
->stripes
, idx
);
480 struct printbuf buf1
= PRINTBUF
;
481 struct printbuf buf2
= PRINTBUF
;
483 bch2_bkey_val_to_text(&buf1
, c
, old
);
484 bch2_bkey_val_to_text(&buf2
, c
, new);
485 bch_err_ratelimited(c
, "error marking nonexistent stripe %llu while marking\n"
487 "new %s", idx
, buf1
.buf
, buf2
.buf
);
488 printbuf_exit(&buf2
);
489 printbuf_exit(&buf1
);
490 bch2_inconsistent_error(c
);
495 bch2_stripes_heap_del(c
, m
, idx
);
497 memset(m
, 0, sizeof(*m
));
499 stripe_to_mem(m
, new_s
);
502 bch2_stripes_heap_insert(c
, m
, idx
);
504 bch2_stripes_heap_update(c
, m
, idx
);
511 /* returns blocknr in stripe that we matched: */
512 static const struct bch_extent_ptr
*bkey_matches_stripe(struct bch_stripe
*s
,
513 struct bkey_s_c k
, unsigned *block
)
515 struct bkey_ptrs_c ptrs
= bch2_bkey_ptrs_c(k
);
516 unsigned i
, nr_data
= s
->nr_blocks
- s
->nr_redundant
;
518 bkey_for_each_ptr(ptrs
, ptr
)
519 for (i
= 0; i
< nr_data
; i
++)
520 if (__bch2_ptr_matches_stripe(&s
->ptrs
[i
], ptr
,
521 le16_to_cpu(s
->sectors
))) {
529 static bool extent_has_stripe_ptr(struct bkey_s_c k
, u64 idx
)
532 case KEY_TYPE_extent
: {
533 struct bkey_s_c_extent e
= bkey_s_c_to_extent(k
);
534 const union bch_extent_entry
*entry
;
536 extent_for_each_entry(e
, entry
)
537 if (extent_entry_type(entry
) ==
538 BCH_EXTENT_ENTRY_stripe_ptr
&&
539 entry
->stripe_ptr
.idx
== idx
)
551 static void ec_stripe_buf_exit(struct ec_stripe_buf
*buf
)
553 if (buf
->key
.k
.type
== KEY_TYPE_stripe
) {
554 struct bkey_i_stripe
*s
= bkey_i_to_stripe(&buf
->key
);
557 for (i
= 0; i
< s
->v
.nr_blocks
; i
++) {
558 kvfree(buf
->data
[i
]);
564 /* XXX: this is a non-mempoolified memory allocation: */
565 static int ec_stripe_buf_init(struct ec_stripe_buf
*buf
,
566 unsigned offset
, unsigned size
)
568 struct bch_stripe
*v
= &bkey_i_to_stripe(&buf
->key
)->v
;
569 unsigned csum_granularity
= 1U << v
->csum_granularity_bits
;
570 unsigned end
= offset
+ size
;
573 BUG_ON(end
> le16_to_cpu(v
->sectors
));
575 offset
= round_down(offset
, csum_granularity
);
576 end
= min_t(unsigned, le16_to_cpu(v
->sectors
),
577 round_up(end
, csum_granularity
));
579 buf
->offset
= offset
;
580 buf
->size
= end
- offset
;
582 memset(buf
->valid
, 0xFF, sizeof(buf
->valid
));
584 for (i
= 0; i
< v
->nr_blocks
; i
++) {
585 buf
->data
[i
] = kvmalloc(buf
->size
<< 9, GFP_KERNEL
);
592 ec_stripe_buf_exit(buf
);
593 return -BCH_ERR_ENOMEM_stripe_buf
;
598 static struct bch_csum
ec_block_checksum(struct ec_stripe_buf
*buf
,
599 unsigned block
, unsigned offset
)
601 struct bch_stripe
*v
= &bkey_i_to_stripe(&buf
->key
)->v
;
602 unsigned csum_granularity
= 1 << v
->csum_granularity_bits
;
603 unsigned end
= buf
->offset
+ buf
->size
;
604 unsigned len
= min(csum_granularity
, end
- offset
);
606 BUG_ON(offset
>= end
);
607 BUG_ON(offset
< buf
->offset
);
608 BUG_ON(offset
& (csum_granularity
- 1));
609 BUG_ON(offset
+ len
!= le16_to_cpu(v
->sectors
) &&
610 (len
& (csum_granularity
- 1)));
612 return bch2_checksum(NULL
, v
->csum_type
,
614 buf
->data
[block
] + ((offset
- buf
->offset
) << 9),
618 static void ec_generate_checksums(struct ec_stripe_buf
*buf
)
620 struct bch_stripe
*v
= &bkey_i_to_stripe(&buf
->key
)->v
;
621 unsigned i
, j
, csums_per_device
= stripe_csums_per_device(v
);
627 BUG_ON(buf
->size
!= le16_to_cpu(v
->sectors
));
629 for (i
= 0; i
< v
->nr_blocks
; i
++)
630 for (j
= 0; j
< csums_per_device
; j
++)
631 stripe_csum_set(v
, i
, j
,
632 ec_block_checksum(buf
, i
, j
<< v
->csum_granularity_bits
));
635 static void ec_validate_checksums(struct bch_fs
*c
, struct ec_stripe_buf
*buf
)
637 struct bch_stripe
*v
= &bkey_i_to_stripe(&buf
->key
)->v
;
638 unsigned csum_granularity
= 1 << v
->csum_granularity_bits
;
644 for (i
= 0; i
< v
->nr_blocks
; i
++) {
645 unsigned offset
= buf
->offset
;
646 unsigned end
= buf
->offset
+ buf
->size
;
648 if (!test_bit(i
, buf
->valid
))
651 while (offset
< end
) {
652 unsigned j
= offset
>> v
->csum_granularity_bits
;
653 unsigned len
= min(csum_granularity
, end
- offset
);
654 struct bch_csum want
= stripe_csum_get(v
, i
, j
);
655 struct bch_csum got
= ec_block_checksum(buf
, i
, offset
);
657 if (bch2_crc_cmp(want
, got
)) {
658 struct bch_dev
*ca
= bch2_dev_tryget(c
, v
->ptrs
[i
].dev
);
660 struct printbuf err
= PRINTBUF
;
662 prt_str(&err
, "stripe ");
663 bch2_csum_err_msg(&err
, v
->csum_type
, want
, got
);
664 prt_printf(&err
, " for %ps at %u of\n ", (void *) _RET_IP_
, i
);
665 bch2_bkey_val_to_text(&err
, c
, bkey_i_to_s_c(&buf
->key
));
666 bch_err_ratelimited(ca
, "%s", err
.buf
);
669 bch2_io_error(ca
, BCH_MEMBER_ERROR_checksum
);
672 clear_bit(i
, buf
->valid
);
681 /* Erasure coding: */
683 static void ec_generate_ec(struct ec_stripe_buf
*buf
)
685 struct bch_stripe
*v
= &bkey_i_to_stripe(&buf
->key
)->v
;
686 unsigned nr_data
= v
->nr_blocks
- v
->nr_redundant
;
687 unsigned bytes
= le16_to_cpu(v
->sectors
) << 9;
689 raid_gen(nr_data
, v
->nr_redundant
, bytes
, buf
->data
);
692 static unsigned ec_nr_failed(struct ec_stripe_buf
*buf
)
694 struct bch_stripe
*v
= &bkey_i_to_stripe(&buf
->key
)->v
;
696 return v
->nr_blocks
- bitmap_weight(buf
->valid
, v
->nr_blocks
);
699 static int ec_do_recov(struct bch_fs
*c
, struct ec_stripe_buf
*buf
)
701 struct bch_stripe
*v
= &bkey_i_to_stripe(&buf
->key
)->v
;
702 unsigned i
, failed
[BCH_BKEY_PTRS_MAX
], nr_failed
= 0;
703 unsigned nr_data
= v
->nr_blocks
- v
->nr_redundant
;
704 unsigned bytes
= buf
->size
<< 9;
706 if (ec_nr_failed(buf
) > v
->nr_redundant
) {
707 bch_err_ratelimited(c
,
708 "error doing reconstruct read: unable to read enough blocks");
712 for (i
= 0; i
< nr_data
; i
++)
713 if (!test_bit(i
, buf
->valid
))
714 failed
[nr_failed
++] = i
;
716 raid_rec(nr_failed
, failed
, nr_data
, v
->nr_redundant
, bytes
, buf
->data
);
722 static void ec_block_endio(struct bio
*bio
)
724 struct ec_bio
*ec_bio
= container_of(bio
, struct ec_bio
, bio
);
725 struct bch_stripe
*v
= &bkey_i_to_stripe(&ec_bio
->buf
->key
)->v
;
726 struct bch_extent_ptr
*ptr
= &v
->ptrs
[ec_bio
->idx
];
727 struct bch_dev
*ca
= ec_bio
->ca
;
728 struct closure
*cl
= bio
->bi_private
;
730 if (bch2_dev_io_err_on(bio
->bi_status
, ca
,
732 ? BCH_MEMBER_ERROR_write
733 : BCH_MEMBER_ERROR_read
,
734 "erasure coding %s error: %s",
735 bio_data_dir(bio
) ? "write" : "read",
736 bch2_blk_status_to_str(bio
->bi_status
)))
737 clear_bit(ec_bio
->idx
, ec_bio
->buf
->valid
);
739 int stale
= dev_ptr_stale(ca
, ptr
);
741 bch_err_ratelimited(ca
->fs
,
742 "error %s stripe: stale/invalid pointer (%i) after io",
743 bio_data_dir(bio
) == READ
? "reading from" : "writing to",
745 clear_bit(ec_bio
->idx
, ec_bio
->buf
->valid
);
748 bio_put(&ec_bio
->bio
);
749 percpu_ref_put(&ca
->io_ref
);
753 static void ec_block_io(struct bch_fs
*c
, struct ec_stripe_buf
*buf
,
754 blk_opf_t opf
, unsigned idx
, struct closure
*cl
)
756 struct bch_stripe
*v
= &bkey_i_to_stripe(&buf
->key
)->v
;
757 unsigned offset
= 0, bytes
= buf
->size
<< 9;
758 struct bch_extent_ptr
*ptr
= &v
->ptrs
[idx
];
759 enum bch_data_type data_type
= idx
< v
->nr_blocks
- v
->nr_redundant
762 int rw
= op_is_write(opf
);
764 struct bch_dev
*ca
= bch2_dev_get_ioref(c
, ptr
->dev
, rw
);
766 clear_bit(idx
, buf
->valid
);
770 int stale
= dev_ptr_stale(ca
, ptr
);
772 bch_err_ratelimited(c
,
773 "error %s stripe: stale pointer (%i)",
774 rw
== READ
? "reading from" : "writing to",
776 clear_bit(idx
, buf
->valid
);
781 this_cpu_add(ca
->io_done
->sectors
[rw
][data_type
], buf
->size
);
783 while (offset
< bytes
) {
784 unsigned nr_iovecs
= min_t(size_t, BIO_MAX_VECS
,
785 DIV_ROUND_UP(bytes
, PAGE_SIZE
));
786 unsigned b
= min_t(size_t, bytes
- offset
,
787 nr_iovecs
<< PAGE_SHIFT
);
788 struct ec_bio
*ec_bio
;
790 ec_bio
= container_of(bio_alloc_bioset(ca
->disk_sb
.bdev
,
801 ec_bio
->bio
.bi_iter
.bi_sector
= ptr
->offset
+ buf
->offset
+ (offset
>> 9);
802 ec_bio
->bio
.bi_end_io
= ec_block_endio
;
803 ec_bio
->bio
.bi_private
= cl
;
805 bch2_bio_map(&ec_bio
->bio
, buf
->data
[idx
] + offset
, b
);
808 percpu_ref_get(&ca
->io_ref
);
810 submit_bio(&ec_bio
->bio
);
815 percpu_ref_put(&ca
->io_ref
);
818 static int get_stripe_key_trans(struct btree_trans
*trans
, u64 idx
,
819 struct ec_stripe_buf
*stripe
)
821 struct btree_iter iter
;
825 k
= bch2_bkey_get_iter(trans
, &iter
, BTREE_ID_stripes
,
826 POS(0, idx
), BTREE_ITER_slots
);
830 if (k
.k
->type
!= KEY_TYPE_stripe
) {
834 bkey_reassemble(&stripe
->key
, k
);
836 bch2_trans_iter_exit(trans
, &iter
);
840 /* recovery read path: */
841 int bch2_ec_read_extent(struct btree_trans
*trans
, struct bch_read_bio
*rbio
,
842 struct bkey_s_c orig_k
)
844 struct bch_fs
*c
= trans
->c
;
845 struct ec_stripe_buf
*buf
= NULL
;
847 struct bch_stripe
*v
;
849 const char *msg
= NULL
;
850 struct printbuf msgbuf
= PRINTBUF
;
853 closure_init_stack(&cl
);
855 BUG_ON(!rbio
->pick
.has_ec
);
857 buf
= kzalloc(sizeof(*buf
), GFP_NOFS
);
859 return -BCH_ERR_ENOMEM_ec_read_extent
;
861 ret
= lockrestart_do(trans
, get_stripe_key_trans(trans
, rbio
->pick
.ec
.idx
, buf
));
863 msg
= "stripe not found";
867 v
= &bkey_i_to_stripe(&buf
->key
)->v
;
869 if (!bch2_ptr_matches_stripe(v
, rbio
->pick
)) {
870 msg
= "pointer doesn't match stripe";
874 offset
= rbio
->bio
.bi_iter
.bi_sector
- v
->ptrs
[rbio
->pick
.ec
.block
].offset
;
875 if (offset
+ bio_sectors(&rbio
->bio
) > le16_to_cpu(v
->sectors
)) {
876 msg
= "read is bigger than stripe";
880 ret
= ec_stripe_buf_init(buf
, offset
, bio_sectors(&rbio
->bio
));
886 for (i
= 0; i
< v
->nr_blocks
; i
++)
887 ec_block_io(c
, buf
, REQ_OP_READ
, i
, &cl
);
891 if (ec_nr_failed(buf
) > v
->nr_redundant
) {
892 msg
= "unable to read enough blocks";
896 ec_validate_checksums(c
, buf
);
898 ret
= ec_do_recov(c
, buf
);
902 memcpy_to_bio(&rbio
->bio
, rbio
->bio
.bi_iter
,
903 buf
->data
[rbio
->pick
.ec
.block
] + ((offset
- buf
->offset
) << 9));
905 ec_stripe_buf_exit(buf
);
909 bch2_bkey_val_to_text(&msgbuf
, c
, orig_k
);
910 bch_err_ratelimited(c
,
911 "error doing reconstruct read: %s\n %s", msg
, msgbuf
.buf
);
912 printbuf_exit(&msgbuf
);;
913 ret
= -BCH_ERR_stripe_reconstruct
;
917 /* stripe bucket accounting: */
919 static int __ec_stripe_mem_alloc(struct bch_fs
*c
, size_t idx
, gfp_t gfp
)
921 ec_stripes_heap n
, *h
= &c
->ec_stripes_heap
;
923 if (idx
>= h
->size
) {
924 if (!init_heap(&n
, max(1024UL, roundup_pow_of_two(idx
+ 1)), gfp
))
925 return -BCH_ERR_ENOMEM_ec_stripe_mem_alloc
;
927 mutex_lock(&c
->ec_stripes_heap_lock
);
928 if (n
.size
> h
->size
) {
929 memcpy(n
.data
, h
->data
, h
->nr
* sizeof(h
->data
[0]));
933 mutex_unlock(&c
->ec_stripes_heap_lock
);
938 if (!genradix_ptr_alloc(&c
->stripes
, idx
, gfp
))
939 return -BCH_ERR_ENOMEM_ec_stripe_mem_alloc
;
941 if (c
->gc_pos
.phase
!= GC_PHASE_not_running
&&
942 !genradix_ptr_alloc(&c
->gc_stripes
, idx
, gfp
))
943 return -BCH_ERR_ENOMEM_ec_stripe_mem_alloc
;
948 static int ec_stripe_mem_alloc(struct btree_trans
*trans
,
949 struct btree_iter
*iter
)
951 return allocate_dropping_locks_errcode(trans
,
952 __ec_stripe_mem_alloc(trans
->c
, iter
->pos
.offset
, _gfp
));
956 * Hash table of open stripes:
957 * Stripes that are being created or modified are kept in a hash table, so that
958 * stripe deletion can skip them.
961 static bool __bch2_stripe_is_open(struct bch_fs
*c
, u64 idx
)
963 unsigned hash
= hash_64(idx
, ilog2(ARRAY_SIZE(c
->ec_stripes_new
)));
964 struct ec_stripe_new
*s
;
966 hlist_for_each_entry(s
, &c
->ec_stripes_new
[hash
], hash
)
972 static bool bch2_stripe_is_open(struct bch_fs
*c
, u64 idx
)
976 spin_lock(&c
->ec_stripes_new_lock
);
977 ret
= __bch2_stripe_is_open(c
, idx
);
978 spin_unlock(&c
->ec_stripes_new_lock
);
983 static bool bch2_try_open_stripe(struct bch_fs
*c
,
984 struct ec_stripe_new
*s
,
989 spin_lock(&c
->ec_stripes_new_lock
);
990 ret
= !__bch2_stripe_is_open(c
, idx
);
992 unsigned hash
= hash_64(idx
, ilog2(ARRAY_SIZE(c
->ec_stripes_new
)));
995 hlist_add_head(&s
->hash
, &c
->ec_stripes_new
[hash
]);
997 spin_unlock(&c
->ec_stripes_new_lock
);
1002 static void bch2_stripe_close(struct bch_fs
*c
, struct ec_stripe_new
*s
)
1006 spin_lock(&c
->ec_stripes_new_lock
);
1007 hlist_del_init(&s
->hash
);
1008 spin_unlock(&c
->ec_stripes_new_lock
);
1013 /* Heap of all existing stripes, ordered by blocks_nonempty */
1015 static u64
stripe_idx_to_delete(struct bch_fs
*c
)
1017 ec_stripes_heap
*h
= &c
->ec_stripes_heap
;
1019 lockdep_assert_held(&c
->ec_stripes_heap_lock
);
1022 h
->data
[0].blocks_nonempty
== 0 &&
1023 !bch2_stripe_is_open(c
, h
->data
[0].idx
))
1024 return h
->data
[0].idx
;
1029 static inline void ec_stripes_heap_set_backpointer(ec_stripes_heap
*h
,
1032 struct bch_fs
*c
= container_of(h
, struct bch_fs
, ec_stripes_heap
);
1034 genradix_ptr(&c
->stripes
, h
->data
[i
].idx
)->heap_idx
= i
;
1037 static inline bool ec_stripes_heap_cmp(const void *l
, const void *r
, void __always_unused
*args
)
1039 struct ec_stripe_heap_entry
*_l
= (struct ec_stripe_heap_entry
*)l
;
1040 struct ec_stripe_heap_entry
*_r
= (struct ec_stripe_heap_entry
*)r
;
1042 return ((_l
->blocks_nonempty
> _r
->blocks_nonempty
) <
1043 (_l
->blocks_nonempty
< _r
->blocks_nonempty
));
1046 static inline void ec_stripes_heap_swap(void *l
, void *r
, void *h
)
1048 struct ec_stripe_heap_entry
*_l
= (struct ec_stripe_heap_entry
*)l
;
1049 struct ec_stripe_heap_entry
*_r
= (struct ec_stripe_heap_entry
*)r
;
1050 ec_stripes_heap
*_h
= (ec_stripes_heap
*)h
;
1051 size_t i
= _l
- _h
->data
;
1052 size_t j
= _r
- _h
->data
;
1056 ec_stripes_heap_set_backpointer(_h
, i
);
1057 ec_stripes_heap_set_backpointer(_h
, j
);
1060 static const struct min_heap_callbacks callbacks
= {
1061 .less
= ec_stripes_heap_cmp
,
1062 .swp
= ec_stripes_heap_swap
,
1065 static void heap_verify_backpointer(struct bch_fs
*c
, size_t idx
)
1067 ec_stripes_heap
*h
= &c
->ec_stripes_heap
;
1068 struct stripe
*m
= genradix_ptr(&c
->stripes
, idx
);
1070 BUG_ON(m
->heap_idx
>= h
->nr
);
1071 BUG_ON(h
->data
[m
->heap_idx
].idx
!= idx
);
1074 void bch2_stripes_heap_del(struct bch_fs
*c
,
1075 struct stripe
*m
, size_t idx
)
1077 mutex_lock(&c
->ec_stripes_heap_lock
);
1078 heap_verify_backpointer(c
, idx
);
1080 min_heap_del(&c
->ec_stripes_heap
, m
->heap_idx
, &callbacks
, &c
->ec_stripes_heap
);
1081 mutex_unlock(&c
->ec_stripes_heap_lock
);
1084 void bch2_stripes_heap_insert(struct bch_fs
*c
,
1085 struct stripe
*m
, size_t idx
)
1087 mutex_lock(&c
->ec_stripes_heap_lock
);
1088 BUG_ON(min_heap_full(&c
->ec_stripes_heap
));
1090 genradix_ptr(&c
->stripes
, idx
)->heap_idx
= c
->ec_stripes_heap
.nr
;
1091 min_heap_push(&c
->ec_stripes_heap
, &((struct ec_stripe_heap_entry
) {
1093 .blocks_nonempty
= m
->blocks_nonempty
,
1096 &c
->ec_stripes_heap
);
1098 heap_verify_backpointer(c
, idx
);
1099 mutex_unlock(&c
->ec_stripes_heap_lock
);
1102 void bch2_stripes_heap_update(struct bch_fs
*c
,
1103 struct stripe
*m
, size_t idx
)
1105 ec_stripes_heap
*h
= &c
->ec_stripes_heap
;
1109 mutex_lock(&c
->ec_stripes_heap_lock
);
1110 heap_verify_backpointer(c
, idx
);
1112 h
->data
[m
->heap_idx
].blocks_nonempty
= m
->blocks_nonempty
;
1115 min_heap_sift_up(h
, i
, &callbacks
, &c
->ec_stripes_heap
);
1116 min_heap_sift_down(h
, i
, &callbacks
, &c
->ec_stripes_heap
);
1118 heap_verify_backpointer(c
, idx
);
1120 do_deletes
= stripe_idx_to_delete(c
) != 0;
1121 mutex_unlock(&c
->ec_stripes_heap_lock
);
1124 bch2_do_stripe_deletes(c
);
1127 /* stripe deletion */
1129 static int ec_stripe_delete(struct btree_trans
*trans
, u64 idx
)
1131 struct bch_fs
*c
= trans
->c
;
1132 struct btree_iter iter
;
1134 struct bkey_s_c_stripe s
;
1137 k
= bch2_bkey_get_iter(trans
, &iter
, BTREE_ID_stripes
, POS(0, idx
),
1143 if (k
.k
->type
!= KEY_TYPE_stripe
) {
1144 bch2_fs_inconsistent(c
, "attempting to delete nonexistent stripe %llu", idx
);
1149 s
= bkey_s_c_to_stripe(k
);
1150 for (unsigned i
= 0; i
< s
.v
->nr_blocks
; i
++)
1151 if (stripe_blockcount_get(s
.v
, i
)) {
1152 struct printbuf buf
= PRINTBUF
;
1154 bch2_bkey_val_to_text(&buf
, c
, k
);
1155 bch2_fs_inconsistent(c
, "attempting to delete nonempty stripe %s", buf
.buf
);
1156 printbuf_exit(&buf
);
1161 ret
= bch2_btree_delete_at(trans
, &iter
, 0);
1163 bch2_trans_iter_exit(trans
, &iter
);
1167 static void ec_stripe_delete_work(struct work_struct
*work
)
1170 container_of(work
, struct bch_fs
, ec_stripe_delete_work
);
1173 mutex_lock(&c
->ec_stripes_heap_lock
);
1174 u64 idx
= stripe_idx_to_delete(c
);
1175 mutex_unlock(&c
->ec_stripes_heap_lock
);
1180 int ret
= bch2_trans_commit_do(c
, NULL
, NULL
, BCH_TRANS_COMMIT_no_enospc
,
1181 ec_stripe_delete(trans
, idx
));
1187 bch2_write_ref_put(c
, BCH_WRITE_REF_stripe_delete
);
1190 void bch2_do_stripe_deletes(struct bch_fs
*c
)
1192 if (bch2_write_ref_tryget(c
, BCH_WRITE_REF_stripe_delete
) &&
1193 !queue_work(c
->write_ref_wq
, &c
->ec_stripe_delete_work
))
1194 bch2_write_ref_put(c
, BCH_WRITE_REF_stripe_delete
);
1197 /* stripe creation: */
1199 static int ec_stripe_key_update(struct btree_trans
*trans
,
1200 struct bkey_i_stripe
*old
,
1201 struct bkey_i_stripe
*new)
1203 struct bch_fs
*c
= trans
->c
;
1206 struct btree_iter iter
;
1207 struct bkey_s_c k
= bch2_bkey_get_iter(trans
, &iter
, BTREE_ID_stripes
,
1208 new->k
.p
, BTREE_ITER_intent
);
1209 int ret
= bkey_err(k
);
1213 if (bch2_fs_inconsistent_on(k
.k
->type
!= (create
? KEY_TYPE_deleted
: KEY_TYPE_stripe
),
1214 c
, "error %s stripe: got existing key type %s",
1215 create
? "creating" : "updating",
1216 bch2_bkey_types
[k
.k
->type
])) {
1221 if (k
.k
->type
== KEY_TYPE_stripe
) {
1222 const struct bch_stripe
*v
= bkey_s_c_to_stripe(k
).v
;
1224 BUG_ON(old
->v
.nr_blocks
!= new->v
.nr_blocks
);
1225 BUG_ON(old
->v
.nr_blocks
!= v
->nr_blocks
);
1227 for (unsigned i
= 0; i
< new->v
.nr_blocks
; i
++) {
1228 unsigned sectors
= stripe_blockcount_get(v
, i
);
1230 if (!bch2_extent_ptr_eq(old
->v
.ptrs
[i
], new->v
.ptrs
[i
]) && sectors
) {
1231 struct printbuf buf
= PRINTBUF
;
1233 prt_printf(&buf
, "stripe changed nonempty block %u", i
);
1234 prt_str(&buf
, "\nold: ");
1235 bch2_bkey_val_to_text(&buf
, c
, k
);
1236 prt_str(&buf
, "\nnew: ");
1237 bch2_bkey_val_to_text(&buf
, c
, bkey_i_to_s_c(&new->k_i
));
1238 bch2_fs_inconsistent(c
, "%s", buf
.buf
);
1239 printbuf_exit(&buf
);
1245 * If the stripe ptr changed underneath us, it must have
1246 * been dev_remove_stripes() -> * invalidate_stripe_to_dev()
1248 if (!bch2_extent_ptr_eq(old
->v
.ptrs
[i
], v
->ptrs
[i
])) {
1249 BUG_ON(v
->ptrs
[i
].dev
!= BCH_SB_MEMBER_INVALID
);
1251 if (bch2_extent_ptr_eq(old
->v
.ptrs
[i
], new->v
.ptrs
[i
]))
1252 new->v
.ptrs
[i
].dev
= BCH_SB_MEMBER_INVALID
;
1255 stripe_blockcount_set(&new->v
, i
, sectors
);
1259 ret
= bch2_trans_update(trans
, &iter
, &new->k_i
, 0);
1261 bch2_trans_iter_exit(trans
, &iter
);
1265 static int ec_stripe_update_extent(struct btree_trans
*trans
,
1267 struct bpos bucket
, u8 gen
,
1268 struct ec_stripe_buf
*s
,
1269 struct bpos
*bp_pos
)
1271 struct bch_stripe
*v
= &bkey_i_to_stripe(&s
->key
)->v
;
1272 struct bch_fs
*c
= trans
->c
;
1273 struct bch_backpointer bp
;
1274 struct btree_iter iter
;
1276 const struct bch_extent_ptr
*ptr_c
;
1277 struct bch_extent_ptr
*ec_ptr
= NULL
;
1278 struct bch_extent_stripe_ptr stripe_ptr
;
1280 int ret
, dev
, block
;
1282 ret
= bch2_get_next_backpointer(trans
, ca
, bucket
, gen
,
1283 bp_pos
, &bp
, BTREE_ITER_cached
);
1286 if (bpos_eq(*bp_pos
, SPOS_MAX
))
1290 struct printbuf buf
= PRINTBUF
;
1291 struct btree_iter node_iter
;
1294 b
= bch2_backpointer_get_node(trans
, &node_iter
, *bp_pos
, bp
);
1295 bch2_trans_iter_exit(trans
, &node_iter
);
1300 prt_printf(&buf
, "found btree node in erasure coded bucket: b=%px\n", b
);
1301 bch2_backpointer_to_text(&buf
, &bp
);
1303 bch2_fs_inconsistent(c
, "%s", buf
.buf
);
1304 printbuf_exit(&buf
);
1308 k
= bch2_backpointer_get_key(trans
, &iter
, *bp_pos
, bp
, BTREE_ITER_intent
);
1314 * extent no longer exists - we could flush the btree
1315 * write buffer and retry to verify, but no need:
1320 if (extent_has_stripe_ptr(k
, s
->key
.k
.p
.offset
))
1323 ptr_c
= bkey_matches_stripe(v
, k
, &block
);
1325 * It doesn't generally make sense to erasure code cached ptrs:
1326 * XXX: should we be incrementing a counter?
1328 if (!ptr_c
|| ptr_c
->cached
)
1331 dev
= v
->ptrs
[block
].dev
;
1333 n
= bch2_trans_kmalloc(trans
, bkey_bytes(k
.k
) + sizeof(stripe_ptr
));
1334 ret
= PTR_ERR_OR_ZERO(n
);
1338 bkey_reassemble(n
, k
);
1340 bch2_bkey_drop_ptrs_noerror(bkey_i_to_s(n
), ptr
, ptr
->dev
!= dev
);
1341 ec_ptr
= bch2_bkey_has_device(bkey_i_to_s(n
), dev
);
1344 stripe_ptr
= (struct bch_extent_stripe_ptr
) {
1345 .type
= 1 << BCH_EXTENT_ENTRY_stripe_ptr
,
1347 .redundancy
= v
->nr_redundant
,
1348 .idx
= s
->key
.k
.p
.offset
,
1351 __extent_entry_insert(n
,
1352 (union bch_extent_entry
*) ec_ptr
,
1353 (union bch_extent_entry
*) &stripe_ptr
);
1355 ret
= bch2_trans_update(trans
, &iter
, n
, 0);
1357 bch2_trans_iter_exit(trans
, &iter
);
1361 static int ec_stripe_update_bucket(struct btree_trans
*trans
, struct ec_stripe_buf
*s
,
1364 struct bch_fs
*c
= trans
->c
;
1365 struct bch_stripe
*v
= &bkey_i_to_stripe(&s
->key
)->v
;
1366 struct bch_extent_ptr ptr
= v
->ptrs
[block
];
1367 struct bpos bp_pos
= POS_MIN
;
1370 struct bch_dev
*ca
= bch2_dev_tryget(c
, ptr
.dev
);
1374 struct bpos bucket_pos
= PTR_BUCKET_POS(ca
, &ptr
);
1377 ret
= commit_do(trans
, NULL
, NULL
,
1378 BCH_TRANS_COMMIT_no_check_rw
|
1379 BCH_TRANS_COMMIT_no_enospc
,
1380 ec_stripe_update_extent(trans
, ca
, bucket_pos
, ptr
.gen
, s
, &bp_pos
));
1383 if (bkey_eq(bp_pos
, POS_MAX
))
1386 bp_pos
= bpos_nosnap_successor(bp_pos
);
1393 static int ec_stripe_update_extents(struct bch_fs
*c
, struct ec_stripe_buf
*s
)
1395 struct btree_trans
*trans
= bch2_trans_get(c
);
1396 struct bch_stripe
*v
= &bkey_i_to_stripe(&s
->key
)->v
;
1397 unsigned i
, nr_data
= v
->nr_blocks
- v
->nr_redundant
;
1400 ret
= bch2_btree_write_buffer_flush_sync(trans
);
1404 for (i
= 0; i
< nr_data
; i
++) {
1405 ret
= ec_stripe_update_bucket(trans
, s
, i
);
1410 bch2_trans_put(trans
);
1415 static void zero_out_rest_of_ec_bucket(struct bch_fs
*c
,
1416 struct ec_stripe_new
*s
,
1418 struct open_bucket
*ob
)
1420 struct bch_dev
*ca
= bch2_dev_get_ioref(c
, ob
->dev
, WRITE
);
1422 s
->err
= -BCH_ERR_erofs_no_writes
;
1426 unsigned offset
= ca
->mi
.bucket_size
- ob
->sectors_free
;
1427 memset(s
->new_stripe
.data
[block
] + (offset
<< 9),
1429 ob
->sectors_free
<< 9);
1431 int ret
= blkdev_issue_zeroout(ca
->disk_sb
.bdev
,
1432 ob
->bucket
* ca
->mi
.bucket_size
+ offset
,
1436 percpu_ref_put(&ca
->io_ref
);
1442 void bch2_ec_stripe_new_free(struct bch_fs
*c
, struct ec_stripe_new
*s
)
1445 bch2_stripe_close(c
, s
);
1450 * data buckets of new stripe all written: create the stripe
1452 static void ec_stripe_create(struct ec_stripe_new
*s
)
1454 struct bch_fs
*c
= s
->c
;
1455 struct open_bucket
*ob
;
1456 struct bch_stripe
*v
= &bkey_i_to_stripe(&s
->new_stripe
.key
)->v
;
1457 unsigned i
, nr_data
= v
->nr_blocks
- v
->nr_redundant
;
1460 BUG_ON(s
->h
->s
== s
);
1462 closure_sync(&s
->iodone
);
1465 for (i
= 0; i
< nr_data
; i
++)
1467 ob
= c
->open_buckets
+ s
->blocks
[i
];
1469 if (ob
->sectors_free
)
1470 zero_out_rest_of_ec_bucket(c
, s
, i
, ob
);
1475 if (!bch2_err_matches(s
->err
, EROFS
))
1476 bch_err(c
, "error creating stripe: error writing data buckets");
1480 if (s
->have_existing_stripe
) {
1481 ec_validate_checksums(c
, &s
->existing_stripe
);
1483 if (ec_do_recov(c
, &s
->existing_stripe
)) {
1484 bch_err(c
, "error creating stripe: error reading existing stripe");
1488 for (i
= 0; i
< nr_data
; i
++)
1489 if (stripe_blockcount_get(&bkey_i_to_stripe(&s
->existing_stripe
.key
)->v
, i
))
1490 swap(s
->new_stripe
.data
[i
],
1491 s
->existing_stripe
.data
[i
]);
1493 ec_stripe_buf_exit(&s
->existing_stripe
);
1496 BUG_ON(!s
->allocated
);
1499 ec_generate_ec(&s
->new_stripe
);
1501 ec_generate_checksums(&s
->new_stripe
);
1504 for (i
= nr_data
; i
< v
->nr_blocks
; i
++)
1505 ec_block_io(c
, &s
->new_stripe
, REQ_OP_WRITE
, i
, &s
->iodone
);
1506 closure_sync(&s
->iodone
);
1508 if (ec_nr_failed(&s
->new_stripe
)) {
1509 bch_err(c
, "error creating stripe: error writing redundancy buckets");
1513 ret
= bch2_trans_commit_do(c
, &s
->res
, NULL
,
1514 BCH_TRANS_COMMIT_no_check_rw
|
1515 BCH_TRANS_COMMIT_no_enospc
,
1516 ec_stripe_key_update(trans
,
1517 s
->have_existing_stripe
1518 ? bkey_i_to_stripe(&s
->existing_stripe
.key
)
1520 bkey_i_to_stripe(&s
->new_stripe
.key
)));
1521 bch_err_msg(c
, ret
, "creating stripe key");
1526 ret
= ec_stripe_update_extents(c
, &s
->new_stripe
);
1527 bch_err_msg(c
, ret
, "error updating extents");
1531 bch2_disk_reservation_put(c
, &s
->res
);
1533 for (i
= 0; i
< v
->nr_blocks
; i
++)
1535 ob
= c
->open_buckets
+ s
->blocks
[i
];
1539 __bch2_open_bucket_put(c
, ob
);
1541 bch2_open_bucket_put(c
, ob
);
1545 mutex_lock(&c
->ec_stripe_new_lock
);
1547 mutex_unlock(&c
->ec_stripe_new_lock
);
1548 wake_up(&c
->ec_stripe_new_wait
);
1550 ec_stripe_buf_exit(&s
->existing_stripe
);
1551 ec_stripe_buf_exit(&s
->new_stripe
);
1552 closure_debug_destroy(&s
->iodone
);
1554 ec_stripe_new_put(c
, s
, STRIPE_REF_stripe
);
1557 static struct ec_stripe_new
*get_pending_stripe(struct bch_fs
*c
)
1559 struct ec_stripe_new
*s
;
1561 mutex_lock(&c
->ec_stripe_new_lock
);
1562 list_for_each_entry(s
, &c
->ec_stripe_new_list
, list
)
1563 if (!atomic_read(&s
->ref
[STRIPE_REF_io
]))
1567 mutex_unlock(&c
->ec_stripe_new_lock
);
1572 static void ec_stripe_create_work(struct work_struct
*work
)
1574 struct bch_fs
*c
= container_of(work
,
1575 struct bch_fs
, ec_stripe_create_work
);
1576 struct ec_stripe_new
*s
;
1578 while ((s
= get_pending_stripe(c
)))
1579 ec_stripe_create(s
);
1581 bch2_write_ref_put(c
, BCH_WRITE_REF_stripe_create
);
1584 void bch2_ec_do_stripe_creates(struct bch_fs
*c
)
1586 bch2_write_ref_get(c
, BCH_WRITE_REF_stripe_create
);
1588 if (!queue_work(system_long_wq
, &c
->ec_stripe_create_work
))
1589 bch2_write_ref_put(c
, BCH_WRITE_REF_stripe_create
);
1592 static void ec_stripe_new_set_pending(struct bch_fs
*c
, struct ec_stripe_head
*h
)
1594 struct ec_stripe_new
*s
= h
->s
;
1596 lockdep_assert_held(&h
->lock
);
1598 BUG_ON(!s
->allocated
&& !s
->err
);
1603 mutex_lock(&c
->ec_stripe_new_lock
);
1604 list_add(&s
->list
, &c
->ec_stripe_new_list
);
1605 mutex_unlock(&c
->ec_stripe_new_lock
);
1607 ec_stripe_new_put(c
, s
, STRIPE_REF_io
);
1610 static void ec_stripe_new_cancel(struct bch_fs
*c
, struct ec_stripe_head
*h
, int err
)
1613 ec_stripe_new_set_pending(c
, h
);
1616 void bch2_ec_bucket_cancel(struct bch_fs
*c
, struct open_bucket
*ob
)
1618 struct ec_stripe_new
*s
= ob
->ec
;
1623 void *bch2_writepoint_ec_buf(struct bch_fs
*c
, struct write_point
*wp
)
1625 struct open_bucket
*ob
= ec_open_bucket(c
, &wp
->ptrs
);
1629 BUG_ON(!ob
->ec
->new_stripe
.data
[ob
->ec_idx
]);
1631 struct bch_dev
*ca
= ob_dev(c
, ob
);
1632 unsigned offset
= ca
->mi
.bucket_size
- ob
->sectors_free
;
1634 return ob
->ec
->new_stripe
.data
[ob
->ec_idx
] + (offset
<< 9);
1637 static int unsigned_cmp(const void *_l
, const void *_r
)
1639 unsigned l
= *((const unsigned *) _l
);
1640 unsigned r
= *((const unsigned *) _r
);
1642 return cmp_int(l
, r
);
1645 /* pick most common bucket size: */
1646 static unsigned pick_blocksize(struct bch_fs
*c
,
1647 struct bch_devs_mask
*devs
)
1649 unsigned nr
= 0, sizes
[BCH_SB_MEMBERS_MAX
];
1652 } cur
= { 0, 0 }, best
= { 0, 0 };
1654 for_each_member_device_rcu(c
, ca
, devs
)
1655 sizes
[nr
++] = ca
->mi
.bucket_size
;
1657 sort(sizes
, nr
, sizeof(unsigned), unsigned_cmp
, NULL
);
1659 for (unsigned i
= 0; i
< nr
; i
++) {
1660 if (sizes
[i
] != cur
.size
) {
1661 if (cur
.nr
> best
.nr
)
1665 cur
.size
= sizes
[i
];
1671 if (cur
.nr
> best
.nr
)
1677 static bool may_create_new_stripe(struct bch_fs
*c
)
1682 static void ec_stripe_key_init(struct bch_fs
*c
,
1686 unsigned stripe_size
,
1687 unsigned disk_label
)
1689 struct bkey_i_stripe
*s
= bkey_stripe_init(k
);
1692 s
->v
.sectors
= cpu_to_le16(stripe_size
);
1694 s
->v
.nr_blocks
= nr_data
+ nr_parity
;
1695 s
->v
.nr_redundant
= nr_parity
;
1696 s
->v
.csum_granularity_bits
= ilog2(c
->opts
.encoded_extent_max
>> 9);
1697 s
->v
.csum_type
= BCH_CSUM_crc32c
;
1698 s
->v
.disk_label
= disk_label
;
1700 while ((u64s
= stripe_val_u64s(&s
->v
)) > BKEY_VAL_U64s_MAX
) {
1701 BUG_ON(1 << s
->v
.csum_granularity_bits
>=
1702 le16_to_cpu(s
->v
.sectors
) ||
1703 s
->v
.csum_granularity_bits
== U8_MAX
);
1704 s
->v
.csum_granularity_bits
++;
1707 set_bkey_val_u64s(&s
->k
, u64s
);
1710 static int ec_new_stripe_alloc(struct bch_fs
*c
, struct ec_stripe_head
*h
)
1712 struct ec_stripe_new
*s
;
1714 lockdep_assert_held(&h
->lock
);
1716 s
= kzalloc(sizeof(*s
), GFP_KERNEL
);
1718 return -BCH_ERR_ENOMEM_ec_new_stripe_alloc
;
1720 mutex_init(&s
->lock
);
1721 closure_init(&s
->iodone
, NULL
);
1722 atomic_set(&s
->ref
[STRIPE_REF_stripe
], 1);
1723 atomic_set(&s
->ref
[STRIPE_REF_io
], 1);
1726 s
->nr_data
= min_t(unsigned, h
->nr_active_devs
,
1727 BCH_BKEY_PTRS_MAX
) - h
->redundancy
;
1728 s
->nr_parity
= h
->redundancy
;
1730 ec_stripe_key_init(c
, &s
->new_stripe
.key
,
1731 s
->nr_data
, s
->nr_parity
,
1732 h
->blocksize
, h
->disk_label
);
1739 static void ec_stripe_head_devs_update(struct bch_fs
*c
, struct ec_stripe_head
*h
)
1741 struct bch_devs_mask devs
= h
->devs
;
1744 h
->devs
= target_rw_devs(c
, BCH_DATA_user
, h
->disk_label
1745 ? group_to_target(h
->disk_label
- 1)
1747 unsigned nr_devs
= dev_mask_nr(&h
->devs
);
1749 for_each_member_device_rcu(c
, ca
, &h
->devs
)
1750 if (!ca
->mi
.durability
)
1751 __clear_bit(ca
->dev_idx
, h
->devs
.d
);
1752 unsigned nr_devs_with_durability
= dev_mask_nr(&h
->devs
);
1754 h
->blocksize
= pick_blocksize(c
, &h
->devs
);
1756 h
->nr_active_devs
= 0;
1757 for_each_member_device_rcu(c
, ca
, &h
->devs
)
1758 if (ca
->mi
.bucket_size
== h
->blocksize
)
1759 h
->nr_active_devs
++;
1764 * If we only have redundancy + 1 devices, we're better off with just
1767 h
->insufficient_devs
= h
->nr_active_devs
< h
->redundancy
+ 2;
1769 if (h
->insufficient_devs
) {
1772 if (nr_devs
< h
->redundancy
+ 2)
1774 else if (nr_devs_with_durability
< h
->redundancy
+ 2)
1775 err
= "cannot use durability=0 devices";
1777 err
= "mismatched bucket sizes";
1780 bch_err(c
, "insufficient devices available to create stripe (have %u, need %u): %s",
1781 h
->nr_active_devs
, h
->redundancy
+ 2, err
);
1784 struct bch_devs_mask devs_leaving
;
1785 bitmap_andnot(devs_leaving
.d
, devs
.d
, h
->devs
.d
, BCH_SB_MEMBERS_MAX
);
1787 if (h
->s
&& !h
->s
->allocated
&& dev_mask_nr(&devs_leaving
))
1788 ec_stripe_new_cancel(c
, h
, -EINTR
);
1790 h
->rw_devs_change_count
= c
->rw_devs_change_count
;
1793 static struct ec_stripe_head
*
1794 ec_new_stripe_head_alloc(struct bch_fs
*c
, unsigned disk_label
,
1795 unsigned algo
, unsigned redundancy
,
1796 enum bch_watermark watermark
)
1798 struct ec_stripe_head
*h
;
1800 h
= kzalloc(sizeof(*h
), GFP_KERNEL
);
1804 mutex_init(&h
->lock
);
1805 BUG_ON(!mutex_trylock(&h
->lock
));
1807 h
->disk_label
= disk_label
;
1809 h
->redundancy
= redundancy
;
1810 h
->watermark
= watermark
;
1812 list_add(&h
->list
, &c
->ec_stripe_head_list
);
1816 void bch2_ec_stripe_head_put(struct bch_fs
*c
, struct ec_stripe_head
*h
)
1820 bitmap_weight(h
->s
->blocks_allocated
,
1821 h
->s
->nr_data
) == h
->s
->nr_data
)
1822 ec_stripe_new_set_pending(c
, h
);
1824 mutex_unlock(&h
->lock
);
1827 static struct ec_stripe_head
*
1828 __bch2_ec_stripe_head_get(struct btree_trans
*trans
,
1829 unsigned disk_label
,
1831 unsigned redundancy
,
1832 enum bch_watermark watermark
)
1834 struct bch_fs
*c
= trans
->c
;
1835 struct ec_stripe_head
*h
;
1841 ret
= bch2_trans_mutex_lock(trans
, &c
->ec_stripe_head_lock
);
1843 return ERR_PTR(ret
);
1845 if (test_bit(BCH_FS_going_ro
, &c
->flags
)) {
1846 h
= ERR_PTR(-BCH_ERR_erofs_no_writes
);
1850 list_for_each_entry(h
, &c
->ec_stripe_head_list
, list
)
1851 if (h
->disk_label
== disk_label
&&
1853 h
->redundancy
== redundancy
&&
1854 h
->watermark
== watermark
) {
1855 ret
= bch2_trans_mutex_lock(trans
, &h
->lock
);
1863 h
= ec_new_stripe_head_alloc(c
, disk_label
, algo
, redundancy
, watermark
);
1865 h
= ERR_PTR(-BCH_ERR_ENOMEM_stripe_head_alloc
);
1869 if (h
->rw_devs_change_count
!= c
->rw_devs_change_count
)
1870 ec_stripe_head_devs_update(c
, h
);
1872 if (h
->insufficient_devs
) {
1873 mutex_unlock(&h
->lock
);
1877 mutex_unlock(&c
->ec_stripe_head_lock
);
1881 static int new_stripe_alloc_buckets(struct btree_trans
*trans
, struct ec_stripe_head
*h
,
1882 enum bch_watermark watermark
, struct closure
*cl
)
1884 struct bch_fs
*c
= trans
->c
;
1885 struct bch_devs_mask devs
= h
->devs
;
1886 struct open_bucket
*ob
;
1887 struct open_buckets buckets
;
1888 struct bch_stripe
*v
= &bkey_i_to_stripe(&h
->s
->new_stripe
.key
)->v
;
1889 unsigned i
, j
, nr_have_parity
= 0, nr_have_data
= 0;
1890 bool have_cache
= true;
1893 BUG_ON(v
->nr_blocks
!= h
->s
->nr_data
+ h
->s
->nr_parity
);
1894 BUG_ON(v
->nr_redundant
!= h
->s
->nr_parity
);
1896 /* * We bypass the sector allocator which normally does this: */
1897 bitmap_and(devs
.d
, devs
.d
, c
->rw_devs
[BCH_DATA_user
].d
, BCH_SB_MEMBERS_MAX
);
1899 for_each_set_bit(i
, h
->s
->blocks_gotten
, v
->nr_blocks
) {
1901 * Note: we don't yet repair invalid blocks (failed/removed
1902 * devices) when reusing stripes - we still need a codepath to
1903 * walk backpointers and update all extents that point to that
1904 * block when updating the stripe
1906 if (v
->ptrs
[i
].dev
!= BCH_SB_MEMBER_INVALID
)
1907 __clear_bit(v
->ptrs
[i
].dev
, devs
.d
);
1909 if (i
< h
->s
->nr_data
)
1915 BUG_ON(nr_have_data
> h
->s
->nr_data
);
1916 BUG_ON(nr_have_parity
> h
->s
->nr_parity
);
1919 if (nr_have_parity
< h
->s
->nr_parity
) {
1920 ret
= bch2_bucket_alloc_set_trans(trans
, &buckets
,
1930 open_bucket_for_each(c
, &buckets
, ob
, i
) {
1931 j
= find_next_zero_bit(h
->s
->blocks_gotten
,
1932 h
->s
->nr_data
+ h
->s
->nr_parity
,
1934 BUG_ON(j
>= h
->s
->nr_data
+ h
->s
->nr_parity
);
1936 h
->s
->blocks
[j
] = buckets
.v
[i
];
1937 v
->ptrs
[j
] = bch2_ob_ptr(c
, ob
);
1938 __set_bit(j
, h
->s
->blocks_gotten
);
1946 if (nr_have_data
< h
->s
->nr_data
) {
1947 ret
= bch2_bucket_alloc_set_trans(trans
, &buckets
,
1957 open_bucket_for_each(c
, &buckets
, ob
, i
) {
1958 j
= find_next_zero_bit(h
->s
->blocks_gotten
,
1960 BUG_ON(j
>= h
->s
->nr_data
);
1962 h
->s
->blocks
[j
] = buckets
.v
[i
];
1963 v
->ptrs
[j
] = bch2_ob_ptr(c
, ob
);
1964 __set_bit(j
, h
->s
->blocks_gotten
);
1974 static s64
get_existing_stripe(struct bch_fs
*c
,
1975 struct ec_stripe_head
*head
)
1977 ec_stripes_heap
*h
= &c
->ec_stripes_heap
;
1983 if (may_create_new_stripe(c
))
1986 mutex_lock(&c
->ec_stripes_heap_lock
);
1987 for (heap_idx
= 0; heap_idx
< h
->nr
; heap_idx
++) {
1988 /* No blocks worth reusing, stripe will just be deleted: */
1989 if (!h
->data
[heap_idx
].blocks_nonempty
)
1992 stripe_idx
= h
->data
[heap_idx
].idx
;
1994 m
= genradix_ptr(&c
->stripes
, stripe_idx
);
1996 if (m
->disk_label
== head
->disk_label
&&
1997 m
->algorithm
== head
->algo
&&
1998 m
->nr_redundant
== head
->redundancy
&&
1999 m
->sectors
== head
->blocksize
&&
2000 m
->blocks_nonempty
< m
->nr_blocks
- m
->nr_redundant
&&
2001 bch2_try_open_stripe(c
, head
->s
, stripe_idx
)) {
2006 mutex_unlock(&c
->ec_stripes_heap_lock
);
2010 static int __bch2_ec_stripe_head_reuse(struct btree_trans
*trans
, struct ec_stripe_head
*h
)
2012 struct bch_fs
*c
= trans
->c
;
2013 struct bch_stripe
*new_v
= &bkey_i_to_stripe(&h
->s
->new_stripe
.key
)->v
;
2014 struct bch_stripe
*existing_v
;
2020 * If we can't allocate a new stripe, and there's no stripes with empty
2021 * blocks for us to reuse, that means we have to wait on copygc:
2023 idx
= get_existing_stripe(c
, h
);
2025 return -BCH_ERR_stripe_alloc_blocked
;
2027 ret
= get_stripe_key_trans(trans
, idx
, &h
->s
->existing_stripe
);
2028 bch2_fs_fatal_err_on(ret
&& !bch2_err_matches(ret
, BCH_ERR_transaction_restart
), c
,
2029 "reading stripe key: %s", bch2_err_str(ret
));
2031 bch2_stripe_close(c
, h
->s
);
2035 existing_v
= &bkey_i_to_stripe(&h
->s
->existing_stripe
.key
)->v
;
2037 BUG_ON(existing_v
->nr_redundant
!= h
->s
->nr_parity
);
2038 h
->s
->nr_data
= existing_v
->nr_blocks
-
2039 existing_v
->nr_redundant
;
2041 ret
= ec_stripe_buf_init(&h
->s
->existing_stripe
, 0, h
->blocksize
);
2043 bch2_stripe_close(c
, h
->s
);
2047 BUG_ON(h
->s
->existing_stripe
.size
!= h
->blocksize
);
2048 BUG_ON(h
->s
->existing_stripe
.size
!= le16_to_cpu(existing_v
->sectors
));
2051 * Free buckets we initially allocated - they might conflict with
2052 * blocks from the stripe we're reusing:
2054 for_each_set_bit(i
, h
->s
->blocks_gotten
, new_v
->nr_blocks
) {
2055 bch2_open_bucket_put(c
, c
->open_buckets
+ h
->s
->blocks
[i
]);
2056 h
->s
->blocks
[i
] = 0;
2058 memset(h
->s
->blocks_gotten
, 0, sizeof(h
->s
->blocks_gotten
));
2059 memset(h
->s
->blocks_allocated
, 0, sizeof(h
->s
->blocks_allocated
));
2061 for (i
= 0; i
< existing_v
->nr_blocks
; i
++) {
2062 if (stripe_blockcount_get(existing_v
, i
)) {
2063 __set_bit(i
, h
->s
->blocks_gotten
);
2064 __set_bit(i
, h
->s
->blocks_allocated
);
2067 ec_block_io(c
, &h
->s
->existing_stripe
, READ
, i
, &h
->s
->iodone
);
2070 bkey_copy(&h
->s
->new_stripe
.key
, &h
->s
->existing_stripe
.key
);
2071 h
->s
->have_existing_stripe
= true;
2076 static int __bch2_ec_stripe_head_reserve(struct btree_trans
*trans
, struct ec_stripe_head
*h
)
2078 struct bch_fs
*c
= trans
->c
;
2079 struct btree_iter iter
;
2081 struct bpos min_pos
= POS(0, 1);
2082 struct bpos start_pos
= bpos_max(min_pos
, POS(0, c
->ec_stripe_hint
));
2085 if (!h
->s
->res
.sectors
) {
2086 ret
= bch2_disk_reservation_get(c
, &h
->s
->res
,
2089 BCH_DISK_RESERVATION_NOFAIL
);
2094 for_each_btree_key_norestart(trans
, iter
, BTREE_ID_stripes
, start_pos
,
2095 BTREE_ITER_slots
|BTREE_ITER_intent
, k
, ret
) {
2096 if (bkey_gt(k
.k
->p
, POS(0, U32_MAX
))) {
2097 if (start_pos
.offset
) {
2098 start_pos
= min_pos
;
2099 bch2_btree_iter_set_pos(&iter
, start_pos
);
2103 ret
= -BCH_ERR_ENOSPC_stripe_create
;
2107 if (bkey_deleted(k
.k
) &&
2108 bch2_try_open_stripe(c
, h
->s
, k
.k
->p
.offset
))
2112 c
->ec_stripe_hint
= iter
.pos
.offset
;
2117 ret
= ec_stripe_mem_alloc(trans
, &iter
);
2119 bch2_stripe_close(c
, h
->s
);
2123 h
->s
->new_stripe
.key
.k
.p
= iter
.pos
;
2125 bch2_trans_iter_exit(trans
, &iter
);
2128 bch2_disk_reservation_put(c
, &h
->s
->res
);
2132 struct ec_stripe_head
*bch2_ec_stripe_head_get(struct btree_trans
*trans
,
2135 unsigned redundancy
,
2136 enum bch_watermark watermark
,
2139 struct bch_fs
*c
= trans
->c
;
2140 struct ec_stripe_head
*h
;
2141 bool waiting
= false;
2142 unsigned disk_label
= 0;
2143 struct target t
= target_decode(target
);
2146 if (t
.type
== TARGET_GROUP
) {
2147 if (t
.group
> U8_MAX
) {
2148 bch_err(c
, "cannot create a stripe when disk_label > U8_MAX");
2151 disk_label
= t
.group
+ 1; /* 0 == no label */
2154 h
= __bch2_ec_stripe_head_get(trans
, disk_label
, algo
, redundancy
, watermark
);
2155 if (IS_ERR_OR_NULL(h
))
2159 ret
= ec_new_stripe_alloc(c
, h
);
2161 bch_err(c
, "failed to allocate new stripe");
2166 if (h
->s
->allocated
)
2169 if (h
->s
->have_existing_stripe
)
2170 goto alloc_existing
;
2172 /* First, try to allocate a full stripe: */
2173 ret
= new_stripe_alloc_buckets(trans
, h
, BCH_WATERMARK_stripe
, NULL
) ?:
2174 __bch2_ec_stripe_head_reserve(trans
, h
);
2177 if (bch2_err_matches(ret
, BCH_ERR_transaction_restart
) ||
2178 bch2_err_matches(ret
, ENOMEM
))
2182 * Not enough buckets available for a full stripe: we must reuse an
2186 ret
= __bch2_ec_stripe_head_reuse(trans
, h
);
2189 if (waiting
|| !cl
|| ret
!= -BCH_ERR_stripe_alloc_blocked
)
2192 if (watermark
== BCH_WATERMARK_copygc
) {
2193 ret
= new_stripe_alloc_buckets(trans
, h
, watermark
, NULL
) ?:
2194 __bch2_ec_stripe_head_reserve(trans
, h
);
2200 /* XXX freelist_wait? */
2201 closure_wait(&c
->freelist_wait
, cl
);
2206 closure_wake_up(&c
->freelist_wait
);
2209 * Retry allocating buckets, with the watermark for this
2212 ret
= new_stripe_alloc_buckets(trans
, h
, watermark
, cl
);
2217 ret
= ec_stripe_buf_init(&h
->s
->new_stripe
, 0, h
->blocksize
);
2221 h
->s
->allocated
= true;
2224 BUG_ON(!h
->s
->new_stripe
.data
[0]);
2225 BUG_ON(trans
->restarted
);
2228 bch2_ec_stripe_head_put(c
, h
);
2229 return ERR_PTR(ret
);
2232 /* device removal */
2234 static int bch2_invalidate_stripe_to_dev(struct btree_trans
*trans
, struct bkey_s_c k_a
)
2236 struct bch_alloc_v4 a_convert
;
2237 const struct bch_alloc_v4
*a
= bch2_alloc_to_v4(k_a
, &a_convert
);
2242 if (a
->stripe_sectors
) {
2243 bch_err(trans
->c
, "trying to invalidate device in stripe when bucket has stripe data");
2244 return -BCH_ERR_invalidate_stripe_to_dev
;
2247 struct btree_iter iter
;
2248 struct bkey_i_stripe
*s
=
2249 bch2_bkey_get_mut_typed(trans
, &iter
, BTREE_ID_stripes
, POS(0, a
->stripe
),
2250 BTREE_ITER_slots
, stripe
);
2251 int ret
= PTR_ERR_OR_ZERO(s
);
2255 struct disk_accounting_pos acc
= {
2256 .type
= BCH_DISK_ACCOUNTING_replicas
,
2260 for (unsigned i
= 0; i
< s
->v
.nr_blocks
; i
++)
2261 sectors
-= stripe_blockcount_get(&s
->v
, i
);
2263 bch2_bkey_to_replicas(&acc
.replicas
, bkey_i_to_s_c(&s
->k_i
));
2264 acc
.replicas
.data_type
= BCH_DATA_user
;
2265 ret
= bch2_disk_accounting_mod(trans
, &acc
, §ors
, 1, false);
2269 struct bkey_ptrs ptrs
= bch2_bkey_ptrs(bkey_i_to_s(&s
->k_i
));
2270 bkey_for_each_ptr(ptrs
, ptr
)
2271 if (ptr
->dev
== k_a
.k
->p
.inode
)
2272 ptr
->dev
= BCH_SB_MEMBER_INVALID
;
2276 bch2_bkey_to_replicas(&acc
.replicas
, bkey_i_to_s_c(&s
->k_i
));
2277 acc
.replicas
.data_type
= BCH_DATA_user
;
2278 ret
= bch2_disk_accounting_mod(trans
, &acc
, §ors
, 1, false);
2282 bch2_trans_iter_exit(trans
, &iter
);
2286 int bch2_dev_remove_stripes(struct bch_fs
*c
, unsigned dev_idx
)
2288 return bch2_trans_run(c
,
2289 for_each_btree_key_upto_commit(trans
, iter
,
2290 BTREE_ID_alloc
, POS(dev_idx
, 0), POS(dev_idx
, U64_MAX
),
2291 BTREE_ITER_intent
, k
,
2293 bch2_invalidate_stripe_to_dev(trans
, k
);
2297 /* startup/shutdown */
2299 static void __bch2_ec_stop(struct bch_fs
*c
, struct bch_dev
*ca
)
2301 struct ec_stripe_head
*h
;
2302 struct open_bucket
*ob
;
2305 mutex_lock(&c
->ec_stripe_head_lock
);
2306 list_for_each_entry(h
, &c
->ec_stripe_head_list
, list
) {
2307 mutex_lock(&h
->lock
);
2314 for (i
= 0; i
< bkey_i_to_stripe(&h
->s
->new_stripe
.key
)->v
.nr_blocks
; i
++) {
2315 if (!h
->s
->blocks
[i
])
2318 ob
= c
->open_buckets
+ h
->s
->blocks
[i
];
2319 if (ob
->dev
== ca
->dev_idx
)
2324 ec_stripe_new_cancel(c
, h
, -BCH_ERR_erofs_no_writes
);
2326 mutex_unlock(&h
->lock
);
2328 mutex_unlock(&c
->ec_stripe_head_lock
);
2331 void bch2_ec_stop_dev(struct bch_fs
*c
, struct bch_dev
*ca
)
2333 __bch2_ec_stop(c
, ca
);
2336 void bch2_fs_ec_stop(struct bch_fs
*c
)
2338 __bch2_ec_stop(c
, NULL
);
2341 static bool bch2_fs_ec_flush_done(struct bch_fs
*c
)
2345 mutex_lock(&c
->ec_stripe_new_lock
);
2346 ret
= list_empty(&c
->ec_stripe_new_list
);
2347 mutex_unlock(&c
->ec_stripe_new_lock
);
2352 void bch2_fs_ec_flush(struct bch_fs
*c
)
2354 wait_event(c
->ec_stripe_new_wait
, bch2_fs_ec_flush_done(c
));
2357 int bch2_stripes_read(struct bch_fs
*c
)
2359 int ret
= bch2_trans_run(c
,
2360 for_each_btree_key(trans
, iter
, BTREE_ID_stripes
, POS_MIN
,
2361 BTREE_ITER_prefetch
, k
, ({
2362 if (k
.k
->type
!= KEY_TYPE_stripe
)
2365 ret
= __ec_stripe_mem_alloc(c
, k
.k
->p
.offset
, GFP_KERNEL
);
2369 struct stripe
*m
= genradix_ptr(&c
->stripes
, k
.k
->p
.offset
);
2371 stripe_to_mem(m
, bkey_s_c_to_stripe(k
).v
);
2373 bch2_stripes_heap_insert(c
, m
, k
.k
->p
.offset
);
2380 void bch2_stripes_heap_to_text(struct printbuf
*out
, struct bch_fs
*c
)
2382 ec_stripes_heap
*h
= &c
->ec_stripes_heap
;
2386 mutex_lock(&c
->ec_stripes_heap_lock
);
2387 for (i
= 0; i
< min_t(size_t, h
->nr
, 50); i
++) {
2388 m
= genradix_ptr(&c
->stripes
, h
->data
[i
].idx
);
2390 prt_printf(out
, "%zu %u/%u+%u", h
->data
[i
].idx
,
2391 h
->data
[i
].blocks_nonempty
,
2392 m
->nr_blocks
- m
->nr_redundant
,
2394 if (bch2_stripe_is_open(c
, h
->data
[i
].idx
))
2395 prt_str(out
, " open");
2398 mutex_unlock(&c
->ec_stripes_heap_lock
);
2401 static void bch2_new_stripe_to_text(struct printbuf
*out
, struct bch_fs
*c
,
2402 struct ec_stripe_new
*s
)
2404 prt_printf(out
, "\tidx %llu blocks %u+%u allocated %u ref %u %u %s obs",
2405 s
->idx
, s
->nr_data
, s
->nr_parity
,
2406 bitmap_weight(s
->blocks_allocated
, s
->nr_data
),
2407 atomic_read(&s
->ref
[STRIPE_REF_io
]),
2408 atomic_read(&s
->ref
[STRIPE_REF_stripe
]),
2409 bch2_watermarks
[s
->h
->watermark
]);
2411 struct bch_stripe
*v
= &bkey_i_to_stripe(&s
->new_stripe
.key
)->v
;
2413 for_each_set_bit(i
, s
->blocks_gotten
, v
->nr_blocks
)
2414 prt_printf(out
, " %u", s
->blocks
[i
]);
2416 bch2_bkey_val_to_text(out
, c
, bkey_i_to_s_c(&s
->new_stripe
.key
));
2420 void bch2_new_stripes_to_text(struct printbuf
*out
, struct bch_fs
*c
)
2422 struct ec_stripe_head
*h
;
2423 struct ec_stripe_new
*s
;
2425 mutex_lock(&c
->ec_stripe_head_lock
);
2426 list_for_each_entry(h
, &c
->ec_stripe_head_list
, list
) {
2427 prt_printf(out
, "disk label %u algo %u redundancy %u %s nr created %llu:\n",
2428 h
->disk_label
, h
->algo
, h
->redundancy
,
2429 bch2_watermarks
[h
->watermark
],
2433 bch2_new_stripe_to_text(out
, c
, h
->s
);
2435 mutex_unlock(&c
->ec_stripe_head_lock
);
2437 prt_printf(out
, "in flight:\n");
2439 mutex_lock(&c
->ec_stripe_new_lock
);
2440 list_for_each_entry(s
, &c
->ec_stripe_new_list
, list
)
2441 bch2_new_stripe_to_text(out
, c
, s
);
2442 mutex_unlock(&c
->ec_stripe_new_lock
);
2445 void bch2_fs_ec_exit(struct bch_fs
*c
)
2447 struct ec_stripe_head
*h
;
2451 mutex_lock(&c
->ec_stripe_head_lock
);
2452 h
= list_first_entry_or_null(&c
->ec_stripe_head_list
,
2453 struct ec_stripe_head
, list
);
2456 mutex_unlock(&c
->ec_stripe_head_lock
);
2461 for (i
= 0; i
< bkey_i_to_stripe(&h
->s
->new_stripe
.key
)->v
.nr_blocks
; i
++)
2462 BUG_ON(h
->s
->blocks
[i
]);
2469 BUG_ON(!list_empty(&c
->ec_stripe_new_list
));
2471 free_heap(&c
->ec_stripes_heap
);
2472 genradix_free(&c
->stripes
);
2473 bioset_exit(&c
->ec_bioset
);
2476 void bch2_fs_ec_init_early(struct bch_fs
*c
)
2478 spin_lock_init(&c
->ec_stripes_new_lock
);
2479 mutex_init(&c
->ec_stripes_heap_lock
);
2481 INIT_LIST_HEAD(&c
->ec_stripe_head_list
);
2482 mutex_init(&c
->ec_stripe_head_lock
);
2484 INIT_LIST_HEAD(&c
->ec_stripe_new_list
);
2485 mutex_init(&c
->ec_stripe_new_lock
);
2486 init_waitqueue_head(&c
->ec_stripe_new_wait
);
2488 INIT_WORK(&c
->ec_stripe_create_work
, ec_stripe_create_work
);
2489 INIT_WORK(&c
->ec_stripe_delete_work
, ec_stripe_delete_work
);
2492 int bch2_fs_ec_init(struct bch_fs
*c
)
2494 return bioset_init(&c
->ec_bioset
, 1, offsetof(struct ec_bio
, bio
),