1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (C) 2018-2023 Oracle. All Rights Reserved.
4 * Author: Darrick J. Wong <djwong@kernel.org>
8 #include "xfs_shared.h"
10 #include "xfs_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_mount.h"
13 #include "xfs_btree.h"
14 #include "scrub/scrub.h"
15 #include "scrub/bitmap.h"
17 #include <linux/interval_tree_generic.h>
21 struct xbitmap64_node
{
22 struct rb_node bn_rbnode
;
24 /* First set bit of this interval and subtree. */
27 /* Last set bit of this interval. */
30 /* Last set bit of this subtree. Do not touch this. */
31 uint64_t __bn_subtree_last
;
34 /* Define our own interval tree type with uint64_t parameters. */
36 #define START(node) ((node)->bn_start)
37 #define LAST(node) ((node)->bn_last)
40 * These functions are defined by the INTERVAL_TREE_DEFINE macro, but we'll
41 * forward-declare them anyway for clarity.
43 static inline __maybe_unused
void
44 xbitmap64_tree_insert(struct xbitmap64_node
*node
, struct rb_root_cached
*root
);
46 static inline __maybe_unused
void
47 xbitmap64_tree_remove(struct xbitmap64_node
*node
, struct rb_root_cached
*root
);
49 static inline __maybe_unused
struct xbitmap64_node
*
50 xbitmap64_tree_iter_first(struct rb_root_cached
*root
, uint64_t start
,
53 static inline __maybe_unused
struct xbitmap64_node
*
54 xbitmap64_tree_iter_next(struct xbitmap64_node
*node
, uint64_t start
,
57 INTERVAL_TREE_DEFINE(struct xbitmap64_node
, bn_rbnode
, uint64_t,
58 __bn_subtree_last
, START
, LAST
, static inline __maybe_unused
,
61 /* Iterate each interval of a bitmap. Do not change the bitmap. */
62 #define for_each_xbitmap64_extent(bn, bitmap) \
63 for ((bn) = rb_entry_safe(rb_first(&(bitmap)->xb_root.rb_root), \
64 struct xbitmap64_node, bn_rbnode); \
66 (bn) = rb_entry_safe(rb_next(&(bn)->bn_rbnode), \
67 struct xbitmap64_node, bn_rbnode))
69 /* Clear a range of this bitmap. */
72 struct xbitmap64
*bitmap
,
76 struct xbitmap64_node
*bn
;
77 struct xbitmap64_node
*new_bn
;
78 uint64_t last
= start
+ len
- 1;
80 while ((bn
= xbitmap64_tree_iter_first(&bitmap
->xb_root
, start
, last
))) {
81 if (bn
->bn_start
< start
&& bn
->bn_last
> last
) {
82 uint64_t old_last
= bn
->bn_last
;
84 /* overlaps with the entire clearing range */
85 xbitmap64_tree_remove(bn
, &bitmap
->xb_root
);
86 bn
->bn_last
= start
- 1;
87 xbitmap64_tree_insert(bn
, &bitmap
->xb_root
);
90 new_bn
= kmalloc(sizeof(struct xbitmap64_node
),
94 new_bn
->bn_start
= last
+ 1;
95 new_bn
->bn_last
= old_last
;
96 xbitmap64_tree_insert(new_bn
, &bitmap
->xb_root
);
97 } else if (bn
->bn_start
< start
) {
98 /* overlaps with the left side of the clearing range */
99 xbitmap64_tree_remove(bn
, &bitmap
->xb_root
);
100 bn
->bn_last
= start
- 1;
101 xbitmap64_tree_insert(bn
, &bitmap
->xb_root
);
102 } else if (bn
->bn_last
> last
) {
103 /* overlaps with the right side of the clearing range */
104 xbitmap64_tree_remove(bn
, &bitmap
->xb_root
);
105 bn
->bn_start
= last
+ 1;
106 xbitmap64_tree_insert(bn
, &bitmap
->xb_root
);
109 /* in the middle of the clearing range */
110 xbitmap64_tree_remove(bn
, &bitmap
->xb_root
);
118 /* Set a range of this bitmap. */
121 struct xbitmap64
*bitmap
,
125 struct xbitmap64_node
*left
;
126 struct xbitmap64_node
*right
;
127 uint64_t last
= start
+ len
- 1;
130 /* Is this whole range already set? */
131 left
= xbitmap64_tree_iter_first(&bitmap
->xb_root
, start
, last
);
132 if (left
&& left
->bn_start
<= start
&& left
->bn_last
>= last
)
135 /* Clear out everything in the range we want to set. */
136 error
= xbitmap64_clear(bitmap
, start
, len
);
140 /* Do we have a left-adjacent extent? */
141 left
= xbitmap64_tree_iter_first(&bitmap
->xb_root
, start
- 1, start
- 1);
142 ASSERT(!left
|| left
->bn_last
+ 1 == start
);
144 /* Do we have a right-adjacent extent? */
145 right
= xbitmap64_tree_iter_first(&bitmap
->xb_root
, last
+ 1, last
+ 1);
146 ASSERT(!right
|| right
->bn_start
== last
+ 1);
149 /* combine left and right adjacent extent */
150 xbitmap64_tree_remove(left
, &bitmap
->xb_root
);
151 xbitmap64_tree_remove(right
, &bitmap
->xb_root
);
152 left
->bn_last
= right
->bn_last
;
153 xbitmap64_tree_insert(left
, &bitmap
->xb_root
);
156 /* combine with left extent */
157 xbitmap64_tree_remove(left
, &bitmap
->xb_root
);
158 left
->bn_last
= last
;
159 xbitmap64_tree_insert(left
, &bitmap
->xb_root
);
161 /* combine with right extent */
162 xbitmap64_tree_remove(right
, &bitmap
->xb_root
);
163 right
->bn_start
= start
;
164 xbitmap64_tree_insert(right
, &bitmap
->xb_root
);
167 left
= kmalloc(sizeof(struct xbitmap64_node
), XCHK_GFP_FLAGS
);
170 left
->bn_start
= start
;
171 left
->bn_last
= last
;
172 xbitmap64_tree_insert(left
, &bitmap
->xb_root
);
178 /* Free everything related to this bitmap. */
181 struct xbitmap64
*bitmap
)
183 struct xbitmap64_node
*bn
;
185 while ((bn
= xbitmap64_tree_iter_first(&bitmap
->xb_root
, 0, -1ULL))) {
186 xbitmap64_tree_remove(bn
, &bitmap
->xb_root
);
191 /* Set up a per-AG block bitmap. */
194 struct xbitmap64
*bitmap
)
196 bitmap
->xb_root
= RB_ROOT_CACHED
;
200 * Remove all the blocks mentioned in @sub from the extents in @bitmap.
202 * The intent is that callers will iterate the rmapbt for all of its records
203 * for a given owner to generate @bitmap; and iterate all the blocks of the
204 * metadata structures that are not being rebuilt and have the same rmapbt
205 * owner to generate @sub. This routine subtracts all the extents
206 * mentioned in sub from all the extents linked in @bitmap, which leaves
207 * @bitmap as the list of blocks that are not accounted for, which we assume
208 * are the dead blocks of the old metadata structure. The blocks mentioned in
209 * @bitmap can be reaped.
211 * This is the logical equivalent of bitmap &= ~sub.
215 struct xbitmap64
*bitmap
,
216 struct xbitmap64
*sub
)
218 struct xbitmap64_node
*bn
;
221 if (xbitmap64_empty(bitmap
) || xbitmap64_empty(sub
))
224 for_each_xbitmap64_extent(bn
, sub
) {
225 error
= xbitmap64_clear(bitmap
, bn
->bn_start
,
226 bn
->bn_last
- bn
->bn_start
+ 1);
234 /* How many bits are set in this bitmap? */
237 struct xbitmap64
*bitmap
)
239 struct xbitmap64_node
*bn
;
242 for_each_xbitmap64_extent(bn
, bitmap
)
243 ret
+= bn
->bn_last
- bn
->bn_start
+ 1;
248 /* Call a function for every run of set bits in this bitmap. */
251 struct xbitmap64
*bitmap
,
252 xbitmap64_walk_fn fn
,
255 struct xbitmap64_node
*bn
;
258 for_each_xbitmap64_extent(bn
, bitmap
) {
259 error
= fn(bn
->bn_start
, bn
->bn_last
- bn
->bn_start
+ 1, priv
);
267 /* Does this bitmap have no bits set at all? */
270 struct xbitmap64
*bitmap
)
272 return bitmap
->xb_root
.rb_root
.rb_node
== NULL
;
275 /* Is the start of the range set or clear? And for how long? */
278 struct xbitmap64
*bitmap
,
282 struct xbitmap64_node
*bn
;
283 uint64_t last
= start
+ *len
- 1;
285 bn
= xbitmap64_tree_iter_first(&bitmap
->xb_root
, start
, last
);
288 if (bn
->bn_start
<= start
) {
289 if (bn
->bn_last
< last
)
290 *len
= bn
->bn_last
- start
+ 1;
293 *len
= bn
->bn_start
- start
;
299 struct xbitmap32_node
{
300 struct rb_node bn_rbnode
;
302 /* First set bit of this interval and subtree. */
305 /* Last set bit of this interval. */
308 /* Last set bit of this subtree. Do not touch this. */
309 uint32_t __bn_subtree_last
;
312 /* Define our own interval tree type with uint32_t parameters. */
315 * These functions are defined by the INTERVAL_TREE_DEFINE macro, but we'll
316 * forward-declare them anyway for clarity.
318 static inline __maybe_unused
void
319 xbitmap32_tree_insert(struct xbitmap32_node
*node
, struct rb_root_cached
*root
);
321 static inline __maybe_unused
void
322 xbitmap32_tree_remove(struct xbitmap32_node
*node
, struct rb_root_cached
*root
);
324 static inline __maybe_unused
struct xbitmap32_node
*
325 xbitmap32_tree_iter_first(struct rb_root_cached
*root
, uint32_t start
,
328 static inline __maybe_unused
struct xbitmap32_node
*
329 xbitmap32_tree_iter_next(struct xbitmap32_node
*node
, uint32_t start
,
332 INTERVAL_TREE_DEFINE(struct xbitmap32_node
, bn_rbnode
, uint32_t,
333 __bn_subtree_last
, START
, LAST
, static inline __maybe_unused
,
336 /* Iterate each interval of a bitmap. Do not change the bitmap. */
337 #define for_each_xbitmap32_extent(bn, bitmap) \
338 for ((bn) = rb_entry_safe(rb_first(&(bitmap)->xb_root.rb_root), \
339 struct xbitmap32_node, bn_rbnode); \
341 (bn) = rb_entry_safe(rb_next(&(bn)->bn_rbnode), \
342 struct xbitmap32_node, bn_rbnode))
344 /* Clear a range of this bitmap. */
347 struct xbitmap32
*bitmap
,
351 struct xbitmap32_node
*bn
;
352 struct xbitmap32_node
*new_bn
;
353 uint32_t last
= start
+ len
- 1;
355 while ((bn
= xbitmap32_tree_iter_first(&bitmap
->xb_root
, start
, last
))) {
356 if (bn
->bn_start
< start
&& bn
->bn_last
> last
) {
357 uint32_t old_last
= bn
->bn_last
;
359 /* overlaps with the entire clearing range */
360 xbitmap32_tree_remove(bn
, &bitmap
->xb_root
);
361 bn
->bn_last
= start
- 1;
362 xbitmap32_tree_insert(bn
, &bitmap
->xb_root
);
365 new_bn
= kmalloc(sizeof(struct xbitmap32_node
),
369 new_bn
->bn_start
= last
+ 1;
370 new_bn
->bn_last
= old_last
;
371 xbitmap32_tree_insert(new_bn
, &bitmap
->xb_root
);
372 } else if (bn
->bn_start
< start
) {
373 /* overlaps with the left side of the clearing range */
374 xbitmap32_tree_remove(bn
, &bitmap
->xb_root
);
375 bn
->bn_last
= start
- 1;
376 xbitmap32_tree_insert(bn
, &bitmap
->xb_root
);
377 } else if (bn
->bn_last
> last
) {
378 /* overlaps with the right side of the clearing range */
379 xbitmap32_tree_remove(bn
, &bitmap
->xb_root
);
380 bn
->bn_start
= last
+ 1;
381 xbitmap32_tree_insert(bn
, &bitmap
->xb_root
);
384 /* in the middle of the clearing range */
385 xbitmap32_tree_remove(bn
, &bitmap
->xb_root
);
393 /* Set a range of this bitmap. */
396 struct xbitmap32
*bitmap
,
400 struct xbitmap32_node
*left
;
401 struct xbitmap32_node
*right
;
402 uint32_t last
= start
+ len
- 1;
405 /* Is this whole range already set? */
406 left
= xbitmap32_tree_iter_first(&bitmap
->xb_root
, start
, last
);
407 if (left
&& left
->bn_start
<= start
&& left
->bn_last
>= last
)
410 /* Clear out everything in the range we want to set. */
411 error
= xbitmap32_clear(bitmap
, start
, len
);
415 /* Do we have a left-adjacent extent? */
416 left
= xbitmap32_tree_iter_first(&bitmap
->xb_root
, start
- 1, start
- 1);
417 ASSERT(!left
|| left
->bn_last
+ 1 == start
);
419 /* Do we have a right-adjacent extent? */
420 right
= xbitmap32_tree_iter_first(&bitmap
->xb_root
, last
+ 1, last
+ 1);
421 ASSERT(!right
|| right
->bn_start
== last
+ 1);
424 /* combine left and right adjacent extent */
425 xbitmap32_tree_remove(left
, &bitmap
->xb_root
);
426 xbitmap32_tree_remove(right
, &bitmap
->xb_root
);
427 left
->bn_last
= right
->bn_last
;
428 xbitmap32_tree_insert(left
, &bitmap
->xb_root
);
431 /* combine with left extent */
432 xbitmap32_tree_remove(left
, &bitmap
->xb_root
);
433 left
->bn_last
= last
;
434 xbitmap32_tree_insert(left
, &bitmap
->xb_root
);
436 /* combine with right extent */
437 xbitmap32_tree_remove(right
, &bitmap
->xb_root
);
438 right
->bn_start
= start
;
439 xbitmap32_tree_insert(right
, &bitmap
->xb_root
);
442 left
= kmalloc(sizeof(struct xbitmap32_node
), XCHK_GFP_FLAGS
);
445 left
->bn_start
= start
;
446 left
->bn_last
= last
;
447 xbitmap32_tree_insert(left
, &bitmap
->xb_root
);
453 /* Free everything related to this bitmap. */
456 struct xbitmap32
*bitmap
)
458 struct xbitmap32_node
*bn
;
460 while ((bn
= xbitmap32_tree_iter_first(&bitmap
->xb_root
, 0, -1U))) {
461 xbitmap32_tree_remove(bn
, &bitmap
->xb_root
);
466 /* Set up a per-AG block bitmap. */
469 struct xbitmap32
*bitmap
)
471 bitmap
->xb_root
= RB_ROOT_CACHED
;
475 * Remove all the blocks mentioned in @sub from the extents in @bitmap.
477 * The intent is that callers will iterate the rmapbt for all of its records
478 * for a given owner to generate @bitmap; and iterate all the blocks of the
479 * metadata structures that are not being rebuilt and have the same rmapbt
480 * owner to generate @sub. This routine subtracts all the extents
481 * mentioned in sub from all the extents linked in @bitmap, which leaves
482 * @bitmap as the list of blocks that are not accounted for, which we assume
483 * are the dead blocks of the old metadata structure. The blocks mentioned in
484 * @bitmap can be reaped.
486 * This is the logical equivalent of bitmap &= ~sub.
490 struct xbitmap32
*bitmap
,
491 struct xbitmap32
*sub
)
493 struct xbitmap32_node
*bn
;
496 if (xbitmap32_empty(bitmap
) || xbitmap32_empty(sub
))
499 for_each_xbitmap32_extent(bn
, sub
) {
500 error
= xbitmap32_clear(bitmap
, bn
->bn_start
,
501 bn
->bn_last
- bn
->bn_start
+ 1);
509 /* How many bits are set in this bitmap? */
512 struct xbitmap32
*bitmap
)
514 struct xbitmap32_node
*bn
;
517 for_each_xbitmap32_extent(bn
, bitmap
)
518 ret
+= bn
->bn_last
- bn
->bn_start
+ 1;
523 /* Call a function for every run of set bits in this bitmap. */
526 struct xbitmap32
*bitmap
,
527 xbitmap32_walk_fn fn
,
530 struct xbitmap32_node
*bn
;
533 for_each_xbitmap32_extent(bn
, bitmap
) {
534 error
= fn(bn
->bn_start
, bn
->bn_last
- bn
->bn_start
+ 1, priv
);
542 /* Does this bitmap have no bits set at all? */
545 struct xbitmap32
*bitmap
)
547 return bitmap
->xb_root
.rb_root
.rb_node
== NULL
;
550 /* Is the start of the range set or clear? And for how long? */
553 struct xbitmap32
*bitmap
,
557 struct xbitmap32_node
*bn
;
558 uint32_t last
= start
+ *len
- 1;
560 bn
= xbitmap32_tree_iter_first(&bitmap
->xb_root
, start
, last
);
563 if (bn
->bn_start
<= start
) {
564 if (bn
->bn_last
< last
)
565 *len
= bn
->bn_last
- start
+ 1;
568 *len
= bn
->bn_start
- start
;
572 /* Count the number of set regions in this bitmap. */
574 xbitmap32_count_set_regions(
575 struct xbitmap32
*bitmap
)
577 struct xbitmap32_node
*bn
;
580 for_each_xbitmap32_extent(bn
, bitmap
)