1 // SPDX-License-Identifier: GPL-2.0
4 #include "alloc_background.h"
5 #include "backpointers.h"
7 #include "btree_node_scan.h"
8 #include "disk_accounting.h"
14 #include "logged_ops.h"
15 #include "rebalance.h"
17 #include "recovery_passes.h"
19 #include "subvolume.h"
23 const char * const bch2_recovery_passes
[] = {
24 #define x(_fn, ...) #_fn,
30 /* Fake recovery pass, so that scan_for_btree_nodes isn't 0: */
31 static int bch2_recovery_pass_empty(struct bch_fs
*c
)
36 static int bch2_set_may_go_rw(struct bch_fs
*c
)
38 struct journal_keys
*keys
= &c
->journal_keys
;
41 * After we go RW, the journal keys buffer can't be modified (except for
42 * setting journal_key->overwritten: it will be accessed by multiple
45 move_gap(keys
, keys
->nr
);
47 set_bit(BCH_FS_may_go_rw
, &c
->flags
);
49 if (keys
->nr
|| c
->opts
.fsck
|| !c
->sb
.clean
|| c
->opts
.recovery_passes
)
50 return bch2_fs_read_write_early(c
);
54 struct recovery_pass_fn
{
55 int (*fn
)(struct bch_fs
*);
59 static struct recovery_pass_fn recovery_pass_fns
[] = {
60 #define x(_fn, _id, _when) { .fn = bch2_##_fn, .when = _when },
65 static const u8 passes_to_stable_map
[] = {
66 #define x(n, id, ...) [BCH_RECOVERY_PASS_##n] = BCH_RECOVERY_PASS_STABLE_##n,
71 static enum bch_recovery_pass_stable
bch2_recovery_pass_to_stable(enum bch_recovery_pass pass
)
73 return passes_to_stable_map
[pass
];
76 u64
bch2_recovery_passes_to_stable(u64 v
)
79 for (unsigned i
= 0; i
< ARRAY_SIZE(passes_to_stable_map
); i
++)
81 ret
|= BIT_ULL(passes_to_stable_map
[i
]);
85 u64
bch2_recovery_passes_from_stable(u64 v
)
87 static const u8 map
[] = {
88 #define x(n, id, ...) [BCH_RECOVERY_PASS_STABLE_##n] = BCH_RECOVERY_PASS_##n,
94 for (unsigned i
= 0; i
< ARRAY_SIZE(map
); i
++)
96 ret
|= BIT_ULL(map
[i
]);
101 * For when we need to rewind recovery passes and run a pass we skipped:
103 int bch2_run_explicit_recovery_pass(struct bch_fs
*c
,
104 enum bch_recovery_pass pass
)
106 if (c
->opts
.recovery_passes
& BIT_ULL(pass
))
109 bch_info(c
, "running explicit recovery pass %s (%u), currently at %s (%u)",
110 bch2_recovery_passes
[pass
], pass
,
111 bch2_recovery_passes
[c
->curr_recovery_pass
], c
->curr_recovery_pass
);
113 c
->opts
.recovery_passes
|= BIT_ULL(pass
);
115 if (c
->curr_recovery_pass
>= pass
) {
116 c
->curr_recovery_pass
= pass
;
117 c
->recovery_passes_complete
&= (1ULL << pass
) >> 1;
118 return -BCH_ERR_restart_recovery
;
124 int bch2_run_explicit_recovery_pass_persistent(struct bch_fs
*c
,
125 enum bch_recovery_pass pass
)
127 enum bch_recovery_pass_stable s
= bch2_recovery_pass_to_stable(pass
);
129 mutex_lock(&c
->sb_lock
);
130 struct bch_sb_field_ext
*ext
= bch2_sb_field_get(c
->disk_sb
.sb
, ext
);
132 if (!test_bit_le64(s
, ext
->recovery_passes_required
)) {
133 __set_bit_le64(s
, ext
->recovery_passes_required
);
136 mutex_unlock(&c
->sb_lock
);
138 return bch2_run_explicit_recovery_pass(c
, pass
);
141 static void bch2_clear_recovery_pass_required(struct bch_fs
*c
,
142 enum bch_recovery_pass pass
)
144 enum bch_recovery_pass_stable s
= bch2_recovery_pass_to_stable(pass
);
146 mutex_lock(&c
->sb_lock
);
147 struct bch_sb_field_ext
*ext
= bch2_sb_field_get(c
->disk_sb
.sb
, ext
);
149 if (test_bit_le64(s
, ext
->recovery_passes_required
)) {
150 __clear_bit_le64(s
, ext
->recovery_passes_required
);
153 mutex_unlock(&c
->sb_lock
);
156 u64
bch2_fsck_recovery_passes(void)
160 for (unsigned i
= 0; i
< ARRAY_SIZE(recovery_pass_fns
); i
++)
161 if (recovery_pass_fns
[i
].when
& PASS_FSCK
)
166 static bool should_run_recovery_pass(struct bch_fs
*c
, enum bch_recovery_pass pass
)
168 struct recovery_pass_fn
*p
= recovery_pass_fns
+ pass
;
170 if (c
->opts
.recovery_passes_exclude
& BIT_ULL(pass
))
172 if (c
->opts
.recovery_passes
& BIT_ULL(pass
))
174 if ((p
->when
& PASS_FSCK
) && c
->opts
.fsck
)
176 if ((p
->when
& PASS_UNCLEAN
) && !c
->sb
.clean
)
178 if (p
->when
& PASS_ALWAYS
)
183 static int bch2_run_recovery_pass(struct bch_fs
*c
, enum bch_recovery_pass pass
)
185 struct recovery_pass_fn
*p
= recovery_pass_fns
+ pass
;
188 if (!(p
->when
& PASS_SILENT
))
189 bch2_print(c
, KERN_INFO
bch2_log_msg(c
, "%s..."),
190 bch2_recovery_passes
[pass
]);
194 if (!(p
->when
& PASS_SILENT
))
195 bch2_print(c
, KERN_CONT
" done\n");
200 int bch2_run_online_recovery_passes(struct bch_fs
*c
)
204 down_read(&c
->state_lock
);
206 for (unsigned i
= 0; i
< ARRAY_SIZE(recovery_pass_fns
); i
++) {
207 struct recovery_pass_fn
*p
= recovery_pass_fns
+ i
;
209 if (!(p
->when
& PASS_ONLINE
))
212 ret
= bch2_run_recovery_pass(c
, i
);
213 if (bch2_err_matches(ret
, BCH_ERR_restart_recovery
)) {
214 i
= c
->curr_recovery_pass
;
221 up_read(&c
->state_lock
);
226 int bch2_run_recovery_passes(struct bch_fs
*c
)
231 * We can't allow set_may_go_rw to be excluded; that would cause us to
232 * use the journal replay keys for updates where it's not expected.
234 c
->opts
.recovery_passes_exclude
&= ~BCH_RECOVERY_PASS_set_may_go_rw
;
236 while (c
->curr_recovery_pass
< ARRAY_SIZE(recovery_pass_fns
)) {
237 if (c
->opts
.recovery_pass_last
&&
238 c
->curr_recovery_pass
> c
->opts
.recovery_pass_last
)
241 if (should_run_recovery_pass(c
, c
->curr_recovery_pass
)) {
242 unsigned pass
= c
->curr_recovery_pass
;
244 ret
= bch2_run_recovery_pass(c
, c
->curr_recovery_pass
) ?:
245 bch2_journal_flush(&c
->journal
);
246 if (bch2_err_matches(ret
, BCH_ERR_restart_recovery
) ||
247 (ret
&& c
->curr_recovery_pass
< pass
))
252 c
->recovery_passes_complete
|= BIT_ULL(c
->curr_recovery_pass
);
255 c
->recovery_pass_done
= max(c
->recovery_pass_done
, c
->curr_recovery_pass
);
257 if (!test_bit(BCH_FS_error
, &c
->flags
))
258 bch2_clear_recovery_pass_required(c
, c
->curr_recovery_pass
);
260 c
->curr_recovery_pass
++;