1 // SPDX-License-Identifier: GPL-2.0
3 #include <linux/slab.h>
7 #include "btrfs_inode.h"
10 * Subpage (sectorsize < PAGE_SIZE) support overview:
14 * - Only support 64K page size for now
15 * This is to make metadata handling easier, as 64K page would ensure
16 * all nodesize would fit inside one page, thus we don't need to handle
17 * cases where a tree block crosses several pages.
19 * - Only metadata read-write for now
20 * The data read-write part is in development.
22 * - Metadata can't cross 64K page boundary
23 * btrfs-progs and kernel have done that for a while, thus only ancient
24 * filesystems could have such problem. For such case, do a graceful
30 * Metadata read is fully supported.
31 * Meaning when reading one tree block will only trigger the read for the
32 * needed range, other unrelated range in the same page will not be touched.
34 * Metadata write support is partial.
35 * The writeback is still for the full page, but we will only submit
36 * the dirty extent buffers in the page.
38 * This means, if we have a metadata page like this:
42 * |/////////| |///////////|
43 * \- Tree block A \- Tree block B
45 * Even if we just want to writeback tree block A, we will also writeback
46 * tree block B if it's also dirty.
48 * This may cause extra metadata writeback which results more COW.
53 * Both metadata and data will use a new structure, btrfs_subpage, to
54 * record the status of each sector inside a page. This provides the extra
58 * Since we have multiple tree blocks inside one page, we can't rely on page
59 * locking anymore, or we will have greatly reduced concurrency or even
60 * deadlocks (hold one tree lock while trying to lock another tree lock in
63 * Thus for metadata locking, subpage support relies on io_tree locking only.
64 * This means a slightly higher tree locking latency.
68 bool btrfs_is_subpage(const struct btrfs_fs_info
*fs_info
, struct address_space
*mapping
)
70 if (fs_info
->sectorsize
>= PAGE_SIZE
)
74 * Only data pages (either through DIO or compression) can have no
75 * mapping. And if page->mapping->host is data inode, it's subpage.
76 * As we have ruled our sectorsize >= PAGE_SIZE case already.
78 if (!mapping
|| !mapping
->host
|| is_data_inode(BTRFS_I(mapping
->host
)))
82 * Now the only remaining case is metadata, which we only go subpage
83 * routine if nodesize < PAGE_SIZE.
85 if (fs_info
->nodesize
< PAGE_SIZE
)
91 int btrfs_attach_subpage(const struct btrfs_fs_info
*fs_info
,
92 struct folio
*folio
, enum btrfs_subpage_type type
)
94 struct btrfs_subpage
*subpage
;
97 * We have cases like a dummy extent buffer page, which is not mapped
98 * and doesn't need to be locked.
101 ASSERT(folio_test_locked(folio
));
103 /* Either not subpage, or the folio already has private attached. */
104 if (!btrfs_is_subpage(fs_info
, folio
->mapping
) || folio_test_private(folio
))
107 subpage
= btrfs_alloc_subpage(fs_info
, type
);
109 return PTR_ERR(subpage
);
111 folio_attach_private(folio
, subpage
);
115 void btrfs_detach_subpage(const struct btrfs_fs_info
*fs_info
, struct folio
*folio
)
117 struct btrfs_subpage
*subpage
;
119 /* Either not subpage, or the folio already has private attached. */
120 if (!btrfs_is_subpage(fs_info
, folio
->mapping
) || !folio_test_private(folio
))
123 subpage
= folio_detach_private(folio
);
125 btrfs_free_subpage(subpage
);
128 struct btrfs_subpage
*btrfs_alloc_subpage(const struct btrfs_fs_info
*fs_info
,
129 enum btrfs_subpage_type type
)
131 struct btrfs_subpage
*ret
;
132 unsigned int real_size
;
134 ASSERT(fs_info
->sectorsize
< PAGE_SIZE
);
136 real_size
= struct_size(ret
, bitmaps
,
137 BITS_TO_LONGS(btrfs_bitmap_nr_max
* fs_info
->sectors_per_page
));
138 ret
= kzalloc(real_size
, GFP_NOFS
);
140 return ERR_PTR(-ENOMEM
);
142 spin_lock_init(&ret
->lock
);
143 if (type
== BTRFS_SUBPAGE_METADATA
)
144 atomic_set(&ret
->eb_refs
, 0);
146 atomic_set(&ret
->nr_locked
, 0);
150 void btrfs_free_subpage(struct btrfs_subpage
*subpage
)
156 * Increase the eb_refs of current subpage.
158 * This is important for eb allocation, to prevent race with last eb freeing
160 * With the eb_refs increased before the eb inserted into radix tree,
161 * detach_extent_buffer_page() won't detach the folio private while we're still
162 * allocating the extent buffer.
164 void btrfs_folio_inc_eb_refs(const struct btrfs_fs_info
*fs_info
, struct folio
*folio
)
166 struct btrfs_subpage
*subpage
;
168 if (!btrfs_is_subpage(fs_info
, folio
->mapping
))
171 ASSERT(folio_test_private(folio
) && folio
->mapping
);
172 lockdep_assert_held(&folio
->mapping
->i_private_lock
);
174 subpage
= folio_get_private(folio
);
175 atomic_inc(&subpage
->eb_refs
);
178 void btrfs_folio_dec_eb_refs(const struct btrfs_fs_info
*fs_info
, struct folio
*folio
)
180 struct btrfs_subpage
*subpage
;
182 if (!btrfs_is_subpage(fs_info
, folio
->mapping
))
185 ASSERT(folio_test_private(folio
) && folio
->mapping
);
186 lockdep_assert_held(&folio
->mapping
->i_private_lock
);
188 subpage
= folio_get_private(folio
);
189 ASSERT(atomic_read(&subpage
->eb_refs
));
190 atomic_dec(&subpage
->eb_refs
);
193 static void btrfs_subpage_assert(const struct btrfs_fs_info
*fs_info
,
194 struct folio
*folio
, u64 start
, u32 len
)
196 /* For subpage support, the folio must be single page. */
197 ASSERT(folio_order(folio
) == 0);
200 ASSERT(folio_test_private(folio
) && folio_get_private(folio
));
201 ASSERT(IS_ALIGNED(start
, fs_info
->sectorsize
) &&
202 IS_ALIGNED(len
, fs_info
->sectorsize
));
204 * The range check only works for mapped page, we can still have
205 * unmapped page like dummy extent buffer pages.
208 ASSERT(folio_pos(folio
) <= start
&&
209 start
+ len
<= folio_pos(folio
) + PAGE_SIZE
);
212 #define subpage_calc_start_bit(fs_info, folio, name, start, len) \
214 unsigned int __start_bit; \
216 btrfs_subpage_assert(fs_info, folio, start, len); \
217 __start_bit = offset_in_page(start) >> fs_info->sectorsize_bits; \
218 __start_bit += fs_info->sectors_per_page * btrfs_bitmap_nr_##name; \
222 static void btrfs_subpage_clamp_range(struct folio
*folio
, u64
*start
, u32
*len
)
224 u64 orig_start
= *start
;
227 *start
= max_t(u64
, folio_pos(folio
), orig_start
);
229 * For certain call sites like btrfs_drop_pages(), we may have pages
230 * beyond the target range. In that case, just set @len to 0, subpage
231 * helpers can handle @len == 0 without any problem.
233 if (folio_pos(folio
) >= orig_start
+ orig_len
)
236 *len
= min_t(u64
, folio_pos(folio
) + PAGE_SIZE
,
237 orig_start
+ orig_len
) - *start
;
240 static bool btrfs_subpage_end_and_test_lock(const struct btrfs_fs_info
*fs_info
,
241 struct folio
*folio
, u64 start
, u32 len
)
243 struct btrfs_subpage
*subpage
= folio_get_private(folio
);
244 const int start_bit
= subpage_calc_start_bit(fs_info
, folio
, locked
, start
, len
);
245 const int nbits
= (len
>> fs_info
->sectorsize_bits
);
247 unsigned int cleared
= 0;
251 btrfs_subpage_assert(fs_info
, folio
, start
, len
);
253 spin_lock_irqsave(&subpage
->lock
, flags
);
255 * We have call sites passing @lock_page into
256 * extent_clear_unlock_delalloc() for compression path.
258 * This @locked_page is locked by plain lock_page(), thus its
259 * subpage::locked is 0. Handle them in a special way.
261 if (atomic_read(&subpage
->nr_locked
) == 0) {
262 spin_unlock_irqrestore(&subpage
->lock
, flags
);
266 for_each_set_bit_from(bit
, subpage
->bitmaps
, start_bit
+ nbits
) {
267 clear_bit(bit
, subpage
->bitmaps
);
270 ASSERT(atomic_read(&subpage
->nr_locked
) >= cleared
);
271 last
= atomic_sub_and_test(cleared
, &subpage
->nr_locked
);
272 spin_unlock_irqrestore(&subpage
->lock
, flags
);
277 * Handle different locked folios:
279 * - Non-subpage folio
282 * - folio locked but without any subpage locked
283 * This happens either before writepage_delalloc() or the delalloc range is
284 * already handled by previous folio.
285 * We can simple unlock it.
287 * - folio locked with subpage range locked.
288 * We go through the locked sectors inside the range and clear their locked
289 * bitmap, reduce the writer lock number, and unlock the page if that's
290 * the last locked range.
292 void btrfs_folio_end_lock(const struct btrfs_fs_info
*fs_info
,
293 struct folio
*folio
, u64 start
, u32 len
)
295 struct btrfs_subpage
*subpage
= folio_get_private(folio
);
297 ASSERT(folio_test_locked(folio
));
299 if (unlikely(!fs_info
) || !btrfs_is_subpage(fs_info
, folio
->mapping
)) {
305 * For subpage case, there are two types of locked page. With or
306 * without locked number.
308 * Since we own the page lock, no one else could touch subpage::locked
309 * and we are safe to do several atomic operations without spinlock.
311 if (atomic_read(&subpage
->nr_locked
) == 0) {
312 /* No subpage lock, locked by plain lock_page(). */
317 btrfs_subpage_clamp_range(folio
, &start
, &len
);
318 if (btrfs_subpage_end_and_test_lock(fs_info
, folio
, start
, len
))
322 void btrfs_folio_end_lock_bitmap(const struct btrfs_fs_info
*fs_info
,
323 struct folio
*folio
, unsigned long bitmap
)
325 struct btrfs_subpage
*subpage
= folio_get_private(folio
);
326 const int start_bit
= fs_info
->sectors_per_page
* btrfs_bitmap_nr_locked
;
332 if (!btrfs_is_subpage(fs_info
, folio
->mapping
)) {
337 if (atomic_read(&subpage
->nr_locked
) == 0) {
338 /* No subpage lock, locked by plain lock_page(). */
343 spin_lock_irqsave(&subpage
->lock
, flags
);
344 for_each_set_bit(bit
, &bitmap
, fs_info
->sectors_per_page
) {
345 if (test_and_clear_bit(bit
+ start_bit
, subpage
->bitmaps
))
348 ASSERT(atomic_read(&subpage
->nr_locked
) >= cleared
);
349 last
= atomic_sub_and_test(cleared
, &subpage
->nr_locked
);
350 spin_unlock_irqrestore(&subpage
->lock
, flags
);
355 #define subpage_test_bitmap_all_set(fs_info, subpage, name) \
356 bitmap_test_range_all_set(subpage->bitmaps, \
357 fs_info->sectors_per_page * btrfs_bitmap_nr_##name, \
358 fs_info->sectors_per_page)
360 #define subpage_test_bitmap_all_zero(fs_info, subpage, name) \
361 bitmap_test_range_all_zero(subpage->bitmaps, \
362 fs_info->sectors_per_page * btrfs_bitmap_nr_##name, \
363 fs_info->sectors_per_page)
365 void btrfs_subpage_set_uptodate(const struct btrfs_fs_info
*fs_info
,
366 struct folio
*folio
, u64 start
, u32 len
)
368 struct btrfs_subpage
*subpage
= folio_get_private(folio
);
369 unsigned int start_bit
= subpage_calc_start_bit(fs_info
, folio
,
370 uptodate
, start
, len
);
373 spin_lock_irqsave(&subpage
->lock
, flags
);
374 bitmap_set(subpage
->bitmaps
, start_bit
, len
>> fs_info
->sectorsize_bits
);
375 if (subpage_test_bitmap_all_set(fs_info
, subpage
, uptodate
))
376 folio_mark_uptodate(folio
);
377 spin_unlock_irqrestore(&subpage
->lock
, flags
);
380 void btrfs_subpage_clear_uptodate(const struct btrfs_fs_info
*fs_info
,
381 struct folio
*folio
, u64 start
, u32 len
)
383 struct btrfs_subpage
*subpage
= folio_get_private(folio
);
384 unsigned int start_bit
= subpage_calc_start_bit(fs_info
, folio
,
385 uptodate
, start
, len
);
388 spin_lock_irqsave(&subpage
->lock
, flags
);
389 bitmap_clear(subpage
->bitmaps
, start_bit
, len
>> fs_info
->sectorsize_bits
);
390 folio_clear_uptodate(folio
);
391 spin_unlock_irqrestore(&subpage
->lock
, flags
);
394 void btrfs_subpage_set_dirty(const struct btrfs_fs_info
*fs_info
,
395 struct folio
*folio
, u64 start
, u32 len
)
397 struct btrfs_subpage
*subpage
= folio_get_private(folio
);
398 unsigned int start_bit
= subpage_calc_start_bit(fs_info
, folio
,
402 spin_lock_irqsave(&subpage
->lock
, flags
);
403 bitmap_set(subpage
->bitmaps
, start_bit
, len
>> fs_info
->sectorsize_bits
);
404 spin_unlock_irqrestore(&subpage
->lock
, flags
);
405 folio_mark_dirty(folio
);
409 * Extra clear_and_test function for subpage dirty bitmap.
411 * Return true if we're the last bits in the dirty_bitmap and clear the
413 * Return false otherwise.
415 * NOTE: Callers should manually clear page dirty for true case, as we have
416 * extra handling for tree blocks.
418 bool btrfs_subpage_clear_and_test_dirty(const struct btrfs_fs_info
*fs_info
,
419 struct folio
*folio
, u64 start
, u32 len
)
421 struct btrfs_subpage
*subpage
= folio_get_private(folio
);
422 unsigned int start_bit
= subpage_calc_start_bit(fs_info
, folio
,
427 spin_lock_irqsave(&subpage
->lock
, flags
);
428 bitmap_clear(subpage
->bitmaps
, start_bit
, len
>> fs_info
->sectorsize_bits
);
429 if (subpage_test_bitmap_all_zero(fs_info
, subpage
, dirty
))
431 spin_unlock_irqrestore(&subpage
->lock
, flags
);
435 void btrfs_subpage_clear_dirty(const struct btrfs_fs_info
*fs_info
,
436 struct folio
*folio
, u64 start
, u32 len
)
440 last
= btrfs_subpage_clear_and_test_dirty(fs_info
, folio
, start
, len
);
442 folio_clear_dirty_for_io(folio
);
445 void btrfs_subpage_set_writeback(const struct btrfs_fs_info
*fs_info
,
446 struct folio
*folio
, u64 start
, u32 len
)
448 struct btrfs_subpage
*subpage
= folio_get_private(folio
);
449 unsigned int start_bit
= subpage_calc_start_bit(fs_info
, folio
,
450 writeback
, start
, len
);
453 spin_lock_irqsave(&subpage
->lock
, flags
);
454 bitmap_set(subpage
->bitmaps
, start_bit
, len
>> fs_info
->sectorsize_bits
);
455 if (!folio_test_writeback(folio
))
456 folio_start_writeback(folio
);
457 spin_unlock_irqrestore(&subpage
->lock
, flags
);
460 void btrfs_subpage_clear_writeback(const struct btrfs_fs_info
*fs_info
,
461 struct folio
*folio
, u64 start
, u32 len
)
463 struct btrfs_subpage
*subpage
= folio_get_private(folio
);
464 unsigned int start_bit
= subpage_calc_start_bit(fs_info
, folio
,
465 writeback
, start
, len
);
468 spin_lock_irqsave(&subpage
->lock
, flags
);
469 bitmap_clear(subpage
->bitmaps
, start_bit
, len
>> fs_info
->sectorsize_bits
);
470 if (subpage_test_bitmap_all_zero(fs_info
, subpage
, writeback
)) {
471 ASSERT(folio_test_writeback(folio
));
472 folio_end_writeback(folio
);
474 spin_unlock_irqrestore(&subpage
->lock
, flags
);
477 void btrfs_subpage_set_ordered(const struct btrfs_fs_info
*fs_info
,
478 struct folio
*folio
, u64 start
, u32 len
)
480 struct btrfs_subpage
*subpage
= folio_get_private(folio
);
481 unsigned int start_bit
= subpage_calc_start_bit(fs_info
, folio
,
482 ordered
, start
, len
);
485 spin_lock_irqsave(&subpage
->lock
, flags
);
486 bitmap_set(subpage
->bitmaps
, start_bit
, len
>> fs_info
->sectorsize_bits
);
487 folio_set_ordered(folio
);
488 spin_unlock_irqrestore(&subpage
->lock
, flags
);
491 void btrfs_subpage_clear_ordered(const struct btrfs_fs_info
*fs_info
,
492 struct folio
*folio
, u64 start
, u32 len
)
494 struct btrfs_subpage
*subpage
= folio_get_private(folio
);
495 unsigned int start_bit
= subpage_calc_start_bit(fs_info
, folio
,
496 ordered
, start
, len
);
499 spin_lock_irqsave(&subpage
->lock
, flags
);
500 bitmap_clear(subpage
->bitmaps
, start_bit
, len
>> fs_info
->sectorsize_bits
);
501 if (subpage_test_bitmap_all_zero(fs_info
, subpage
, ordered
))
502 folio_clear_ordered(folio
);
503 spin_unlock_irqrestore(&subpage
->lock
, flags
);
506 void btrfs_subpage_set_checked(const struct btrfs_fs_info
*fs_info
,
507 struct folio
*folio
, u64 start
, u32 len
)
509 struct btrfs_subpage
*subpage
= folio_get_private(folio
);
510 unsigned int start_bit
= subpage_calc_start_bit(fs_info
, folio
,
511 checked
, start
, len
);
514 spin_lock_irqsave(&subpage
->lock
, flags
);
515 bitmap_set(subpage
->bitmaps
, start_bit
, len
>> fs_info
->sectorsize_bits
);
516 if (subpage_test_bitmap_all_set(fs_info
, subpage
, checked
))
517 folio_set_checked(folio
);
518 spin_unlock_irqrestore(&subpage
->lock
, flags
);
521 void btrfs_subpage_clear_checked(const struct btrfs_fs_info
*fs_info
,
522 struct folio
*folio
, u64 start
, u32 len
)
524 struct btrfs_subpage
*subpage
= folio_get_private(folio
);
525 unsigned int start_bit
= subpage_calc_start_bit(fs_info
, folio
,
526 checked
, start
, len
);
529 spin_lock_irqsave(&subpage
->lock
, flags
);
530 bitmap_clear(subpage
->bitmaps
, start_bit
, len
>> fs_info
->sectorsize_bits
);
531 folio_clear_checked(folio
);
532 spin_unlock_irqrestore(&subpage
->lock
, flags
);
536 * Unlike set/clear which is dependent on each page status, for test all bits
537 * are tested in the same way.
539 #define IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(name) \
540 bool btrfs_subpage_test_##name(const struct btrfs_fs_info *fs_info, \
541 struct folio *folio, u64 start, u32 len) \
543 struct btrfs_subpage *subpage = folio_get_private(folio); \
544 unsigned int start_bit = subpage_calc_start_bit(fs_info, folio, \
546 unsigned long flags; \
549 spin_lock_irqsave(&subpage->lock, flags); \
550 ret = bitmap_test_range_all_set(subpage->bitmaps, start_bit, \
551 len >> fs_info->sectorsize_bits); \
552 spin_unlock_irqrestore(&subpage->lock, flags); \
555 IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(uptodate
);
556 IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(dirty
);
557 IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(writeback
);
558 IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(ordered
);
559 IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(checked
);
562 * Note that, in selftests (extent-io-tests), we can have empty fs_info passed
563 * in. We only test sectorsize == PAGE_SIZE cases so far, thus we can fall
564 * back to regular sectorsize branch.
566 #define IMPLEMENT_BTRFS_PAGE_OPS(name, folio_set_func, \
567 folio_clear_func, folio_test_func) \
568 void btrfs_folio_set_##name(const struct btrfs_fs_info *fs_info, \
569 struct folio *folio, u64 start, u32 len) \
571 if (unlikely(!fs_info) || \
572 !btrfs_is_subpage(fs_info, folio->mapping)) { \
573 folio_set_func(folio); \
576 btrfs_subpage_set_##name(fs_info, folio, start, len); \
578 void btrfs_folio_clear_##name(const struct btrfs_fs_info *fs_info, \
579 struct folio *folio, u64 start, u32 len) \
581 if (unlikely(!fs_info) || \
582 !btrfs_is_subpage(fs_info, folio->mapping)) { \
583 folio_clear_func(folio); \
586 btrfs_subpage_clear_##name(fs_info, folio, start, len); \
588 bool btrfs_folio_test_##name(const struct btrfs_fs_info *fs_info, \
589 struct folio *folio, u64 start, u32 len) \
591 if (unlikely(!fs_info) || \
592 !btrfs_is_subpage(fs_info, folio->mapping)) \
593 return folio_test_func(folio); \
594 return btrfs_subpage_test_##name(fs_info, folio, start, len); \
596 void btrfs_folio_clamp_set_##name(const struct btrfs_fs_info *fs_info, \
597 struct folio *folio, u64 start, u32 len) \
599 if (unlikely(!fs_info) || \
600 !btrfs_is_subpage(fs_info, folio->mapping)) { \
601 folio_set_func(folio); \
604 btrfs_subpage_clamp_range(folio, &start, &len); \
605 btrfs_subpage_set_##name(fs_info, folio, start, len); \
607 void btrfs_folio_clamp_clear_##name(const struct btrfs_fs_info *fs_info, \
608 struct folio *folio, u64 start, u32 len) \
610 if (unlikely(!fs_info) || \
611 !btrfs_is_subpage(fs_info, folio->mapping)) { \
612 folio_clear_func(folio); \
615 btrfs_subpage_clamp_range(folio, &start, &len); \
616 btrfs_subpage_clear_##name(fs_info, folio, start, len); \
618 bool btrfs_folio_clamp_test_##name(const struct btrfs_fs_info *fs_info, \
619 struct folio *folio, u64 start, u32 len) \
621 if (unlikely(!fs_info) || \
622 !btrfs_is_subpage(fs_info, folio->mapping)) \
623 return folio_test_func(folio); \
624 btrfs_subpage_clamp_range(folio, &start, &len); \
625 return btrfs_subpage_test_##name(fs_info, folio, start, len); \
627 IMPLEMENT_BTRFS_PAGE_OPS(uptodate
, folio_mark_uptodate
, folio_clear_uptodate
,
628 folio_test_uptodate
);
629 IMPLEMENT_BTRFS_PAGE_OPS(dirty
, folio_mark_dirty
, folio_clear_dirty_for_io
,
631 IMPLEMENT_BTRFS_PAGE_OPS(writeback
, folio_start_writeback
, folio_end_writeback
,
632 folio_test_writeback
);
633 IMPLEMENT_BTRFS_PAGE_OPS(ordered
, folio_set_ordered
, folio_clear_ordered
,
635 IMPLEMENT_BTRFS_PAGE_OPS(checked
, folio_set_checked
, folio_clear_checked
,
639 * Make sure not only the page dirty bit is cleared, but also subpage dirty bit
642 void btrfs_folio_assert_not_dirty(const struct btrfs_fs_info
*fs_info
,
643 struct folio
*folio
, u64 start
, u32 len
)
645 struct btrfs_subpage
*subpage
;
646 unsigned int start_bit
;
650 if (!IS_ENABLED(CONFIG_BTRFS_ASSERT
))
653 if (!btrfs_is_subpage(fs_info
, folio
->mapping
)) {
654 ASSERT(!folio_test_dirty(folio
));
658 start_bit
= subpage_calc_start_bit(fs_info
, folio
, dirty
, start
, len
);
659 nbits
= len
>> fs_info
->sectorsize_bits
;
660 subpage
= folio_get_private(folio
);
662 spin_lock_irqsave(&subpage
->lock
, flags
);
663 ASSERT(bitmap_test_range_all_zero(subpage
->bitmaps
, start_bit
, nbits
));
664 spin_unlock_irqrestore(&subpage
->lock
, flags
);
668 * This is for folio already locked by plain lock_page()/folio_lock(), which
669 * doesn't have any subpage awareness.
671 * This populates the involved subpage ranges so that subpage helpers can
672 * properly unlock them.
674 void btrfs_folio_set_lock(const struct btrfs_fs_info
*fs_info
,
675 struct folio
*folio
, u64 start
, u32 len
)
677 struct btrfs_subpage
*subpage
;
679 unsigned int start_bit
;
683 ASSERT(folio_test_locked(folio
));
684 if (unlikely(!fs_info
) || !btrfs_is_subpage(fs_info
, folio
->mapping
))
687 subpage
= folio_get_private(folio
);
688 start_bit
= subpage_calc_start_bit(fs_info
, folio
, locked
, start
, len
);
689 nbits
= len
>> fs_info
->sectorsize_bits
;
690 spin_lock_irqsave(&subpage
->lock
, flags
);
691 /* Target range should not yet be locked. */
692 ASSERT(bitmap_test_range_all_zero(subpage
->bitmaps
, start_bit
, nbits
));
693 bitmap_set(subpage
->bitmaps
, start_bit
, nbits
);
694 ret
= atomic_add_return(nbits
, &subpage
->nr_locked
);
695 ASSERT(ret
<= fs_info
->sectors_per_page
);
696 spin_unlock_irqrestore(&subpage
->lock
, flags
);
699 #define GET_SUBPAGE_BITMAP(subpage, fs_info, name, dst) \
701 const int sectors_per_page = fs_info->sectors_per_page; \
703 ASSERT(sectors_per_page < BITS_PER_LONG); \
704 *dst = bitmap_read(subpage->bitmaps, \
705 sectors_per_page * btrfs_bitmap_nr_##name, \
709 void __cold
btrfs_subpage_dump_bitmap(const struct btrfs_fs_info
*fs_info
,
710 struct folio
*folio
, u64 start
, u32 len
)
712 struct btrfs_subpage
*subpage
;
713 const u32 sectors_per_page
= fs_info
->sectors_per_page
;
714 unsigned long uptodate_bitmap
;
715 unsigned long dirty_bitmap
;
716 unsigned long writeback_bitmap
;
717 unsigned long ordered_bitmap
;
718 unsigned long checked_bitmap
;
721 ASSERT(folio_test_private(folio
) && folio_get_private(folio
));
722 ASSERT(sectors_per_page
> 1);
723 subpage
= folio_get_private(folio
);
725 spin_lock_irqsave(&subpage
->lock
, flags
);
726 GET_SUBPAGE_BITMAP(subpage
, fs_info
, uptodate
, &uptodate_bitmap
);
727 GET_SUBPAGE_BITMAP(subpage
, fs_info
, dirty
, &dirty_bitmap
);
728 GET_SUBPAGE_BITMAP(subpage
, fs_info
, writeback
, &writeback_bitmap
);
729 GET_SUBPAGE_BITMAP(subpage
, fs_info
, ordered
, &ordered_bitmap
);
730 GET_SUBPAGE_BITMAP(subpage
, fs_info
, checked
, &checked_bitmap
);
731 GET_SUBPAGE_BITMAP(subpage
, fs_info
, locked
, &checked_bitmap
);
732 spin_unlock_irqrestore(&subpage
->lock
, flags
);
734 dump_page(folio_page(folio
, 0), "btrfs subpage dump");
736 "start=%llu len=%u page=%llu, bitmaps uptodate=%*pbl dirty=%*pbl writeback=%*pbl ordered=%*pbl checked=%*pbl",
737 start
, len
, folio_pos(folio
),
738 sectors_per_page
, &uptodate_bitmap
,
739 sectors_per_page
, &dirty_bitmap
,
740 sectors_per_page
, &writeback_bitmap
,
741 sectors_per_page
, &ordered_bitmap
,
742 sectors_per_page
, &checked_bitmap
);
745 void btrfs_get_subpage_dirty_bitmap(struct btrfs_fs_info
*fs_info
,
747 unsigned long *ret_bitmap
)
749 struct btrfs_subpage
*subpage
;
752 ASSERT(folio_test_private(folio
) && folio_get_private(folio
));
753 ASSERT(fs_info
->sectors_per_page
> 1);
754 subpage
= folio_get_private(folio
);
756 spin_lock_irqsave(&subpage
->lock
, flags
);
757 GET_SUBPAGE_BITMAP(subpage
, fs_info
, dirty
, ret_bitmap
);
758 spin_unlock_irqrestore(&subpage
->lock
, flags
);