1 // SPDX-License-Identifier: GPL-2.0
3 #include <linux/slab.h>
7 #include "btrfs_inode.h"
10 * Subpage (sectorsize < PAGE_SIZE) support overview:
14 * - Only support 64K page size for now
15 * This is to make metadata handling easier, as 64K page would ensure
16 * all nodesize would fit inside one page, thus we don't need to handle
17 * cases where a tree block crosses several pages.
19 * - Only metadata read-write for now
20 * The data read-write part is in development.
22 * - Metadata can't cross 64K page boundary
23 * btrfs-progs and kernel have done that for a while, thus only ancient
24 * filesystems could have such problem. For such case, do a graceful
30 * Metadata read is fully supported.
31 * Meaning when reading one tree block will only trigger the read for the
32 * needed range, other unrelated range in the same page will not be touched.
34 * Metadata write support is partial.
35 * The writeback is still for the full page, but we will only submit
36 * the dirty extent buffers in the page.
38 * This means, if we have a metadata page like this:
42 * |/////////| |///////////|
43 * \- Tree block A \- Tree block B
45 * Even if we just want to writeback tree block A, we will also writeback
46 * tree block B if it's also dirty.
48 * This may cause extra metadata writeback which results more COW.
53 * Both metadata and data will use a new structure, btrfs_subpage, to
54 * record the status of each sector inside a page. This provides the extra
58 * Since we have multiple tree blocks inside one page, we can't rely on page
59 * locking anymore, or we will have greatly reduced concurrency or even
60 * deadlocks (hold one tree lock while trying to lock another tree lock in
63 * Thus for metadata locking, subpage support relies on io_tree locking only.
64 * This means a slightly higher tree locking latency.
68 bool btrfs_is_subpage(const struct btrfs_fs_info
*fs_info
, struct address_space
*mapping
)
70 if (fs_info
->sectorsize
>= PAGE_SIZE
)
74 * Only data pages (either through DIO or compression) can have no
75 * mapping. And if page->mapping->host is data inode, it's subpage.
76 * As we have ruled our sectorsize >= PAGE_SIZE case already.
78 if (!mapping
|| !mapping
->host
|| is_data_inode(BTRFS_I(mapping
->host
)))
82 * Now the only remaining case is metadata, which we only go subpage
83 * routine if nodesize < PAGE_SIZE.
85 if (fs_info
->nodesize
< PAGE_SIZE
)
91 int btrfs_attach_subpage(const struct btrfs_fs_info
*fs_info
,
92 struct folio
*folio
, enum btrfs_subpage_type type
)
94 struct btrfs_subpage
*subpage
;
97 * We have cases like a dummy extent buffer page, which is not mapped
98 * and doesn't need to be locked.
101 ASSERT(folio_test_locked(folio
));
103 /* Either not subpage, or the folio already has private attached. */
104 if (!btrfs_is_subpage(fs_info
, folio
->mapping
) || folio_test_private(folio
))
107 subpage
= btrfs_alloc_subpage(fs_info
, type
);
109 return PTR_ERR(subpage
);
111 folio_attach_private(folio
, subpage
);
115 void btrfs_detach_subpage(const struct btrfs_fs_info
*fs_info
, struct folio
*folio
)
117 struct btrfs_subpage
*subpage
;
119 /* Either not subpage, or the folio already has private attached. */
120 if (!btrfs_is_subpage(fs_info
, folio
->mapping
) || !folio_test_private(folio
))
123 subpage
= folio_detach_private(folio
);
125 btrfs_free_subpage(subpage
);
128 struct btrfs_subpage
*btrfs_alloc_subpage(const struct btrfs_fs_info
*fs_info
,
129 enum btrfs_subpage_type type
)
131 struct btrfs_subpage
*ret
;
132 unsigned int real_size
;
134 ASSERT(fs_info
->sectorsize
< PAGE_SIZE
);
136 real_size
= struct_size(ret
, bitmaps
,
137 BITS_TO_LONGS(btrfs_bitmap_nr_max
* fs_info
->sectors_per_page
));
138 ret
= kzalloc(real_size
, GFP_NOFS
);
140 return ERR_PTR(-ENOMEM
);
142 spin_lock_init(&ret
->lock
);
143 if (type
== BTRFS_SUBPAGE_METADATA
) {
144 atomic_set(&ret
->eb_refs
, 0);
146 atomic_set(&ret
->readers
, 0);
147 atomic_set(&ret
->writers
, 0);
152 void btrfs_free_subpage(struct btrfs_subpage
*subpage
)
158 * Increase the eb_refs of current subpage.
160 * This is important for eb allocation, to prevent race with last eb freeing
162 * With the eb_refs increased before the eb inserted into radix tree,
163 * detach_extent_buffer_page() won't detach the folio private while we're still
164 * allocating the extent buffer.
166 void btrfs_folio_inc_eb_refs(const struct btrfs_fs_info
*fs_info
, struct folio
*folio
)
168 struct btrfs_subpage
*subpage
;
170 if (!btrfs_is_subpage(fs_info
, folio
->mapping
))
173 ASSERT(folio_test_private(folio
) && folio
->mapping
);
174 lockdep_assert_held(&folio
->mapping
->i_private_lock
);
176 subpage
= folio_get_private(folio
);
177 atomic_inc(&subpage
->eb_refs
);
180 void btrfs_folio_dec_eb_refs(const struct btrfs_fs_info
*fs_info
, struct folio
*folio
)
182 struct btrfs_subpage
*subpage
;
184 if (!btrfs_is_subpage(fs_info
, folio
->mapping
))
187 ASSERT(folio_test_private(folio
) && folio
->mapping
);
188 lockdep_assert_held(&folio
->mapping
->i_private_lock
);
190 subpage
= folio_get_private(folio
);
191 ASSERT(atomic_read(&subpage
->eb_refs
));
192 atomic_dec(&subpage
->eb_refs
);
195 static void btrfs_subpage_assert(const struct btrfs_fs_info
*fs_info
,
196 struct folio
*folio
, u64 start
, u32 len
)
198 /* For subpage support, the folio must be single page. */
199 ASSERT(folio_order(folio
) == 0);
202 ASSERT(folio_test_private(folio
) && folio_get_private(folio
));
203 ASSERT(IS_ALIGNED(start
, fs_info
->sectorsize
) &&
204 IS_ALIGNED(len
, fs_info
->sectorsize
));
206 * The range check only works for mapped page, we can still have
207 * unmapped page like dummy extent buffer pages.
210 ASSERT(folio_pos(folio
) <= start
&&
211 start
+ len
<= folio_pos(folio
) + PAGE_SIZE
);
214 #define subpage_calc_start_bit(fs_info, folio, name, start, len) \
216 unsigned int __start_bit; \
218 btrfs_subpage_assert(fs_info, folio, start, len); \
219 __start_bit = offset_in_page(start) >> fs_info->sectorsize_bits; \
220 __start_bit += fs_info->sectors_per_page * btrfs_bitmap_nr_##name; \
224 void btrfs_subpage_start_reader(const struct btrfs_fs_info
*fs_info
,
225 struct folio
*folio
, u64 start
, u32 len
)
227 struct btrfs_subpage
*subpage
= folio_get_private(folio
);
228 const int start_bit
= subpage_calc_start_bit(fs_info
, folio
, locked
, start
, len
);
229 const int nbits
= len
>> fs_info
->sectorsize_bits
;
233 btrfs_subpage_assert(fs_info
, folio
, start
, len
);
235 spin_lock_irqsave(&subpage
->lock
, flags
);
237 * Even though it's just for reading the page, no one should have
238 * locked the subpage range.
240 ASSERT(bitmap_test_range_all_zero(subpage
->bitmaps
, start_bit
, nbits
));
241 bitmap_set(subpage
->bitmaps
, start_bit
, nbits
);
242 atomic_add(nbits
, &subpage
->readers
);
243 spin_unlock_irqrestore(&subpage
->lock
, flags
);
246 void btrfs_subpage_end_reader(const struct btrfs_fs_info
*fs_info
,
247 struct folio
*folio
, u64 start
, u32 len
)
249 struct btrfs_subpage
*subpage
= folio_get_private(folio
);
250 const int start_bit
= subpage_calc_start_bit(fs_info
, folio
, locked
, start
, len
);
251 const int nbits
= len
>> fs_info
->sectorsize_bits
;
256 btrfs_subpage_assert(fs_info
, folio
, start
, len
);
257 is_data
= is_data_inode(BTRFS_I(folio
->mapping
->host
));
259 spin_lock_irqsave(&subpage
->lock
, flags
);
261 /* The range should have already been locked. */
262 ASSERT(bitmap_test_range_all_set(subpage
->bitmaps
, start_bit
, nbits
));
263 ASSERT(atomic_read(&subpage
->readers
) >= nbits
);
265 bitmap_clear(subpage
->bitmaps
, start_bit
, nbits
);
266 last
= atomic_sub_and_test(nbits
, &subpage
->readers
);
269 * For data we need to unlock the page if the last read has finished.
271 * And please don't replace @last with atomic_sub_and_test() call
272 * inside if () condition.
273 * As we want the atomic_sub_and_test() to be always executed.
277 spin_unlock_irqrestore(&subpage
->lock
, flags
);
280 static void btrfs_subpage_clamp_range(struct folio
*folio
, u64
*start
, u32
*len
)
282 u64 orig_start
= *start
;
285 *start
= max_t(u64
, folio_pos(folio
), orig_start
);
287 * For certain call sites like btrfs_drop_pages(), we may have pages
288 * beyond the target range. In that case, just set @len to 0, subpage
289 * helpers can handle @len == 0 without any problem.
291 if (folio_pos(folio
) >= orig_start
+ orig_len
)
294 *len
= min_t(u64
, folio_pos(folio
) + PAGE_SIZE
,
295 orig_start
+ orig_len
) - *start
;
298 static void btrfs_subpage_start_writer(const struct btrfs_fs_info
*fs_info
,
299 struct folio
*folio
, u64 start
, u32 len
)
301 struct btrfs_subpage
*subpage
= folio_get_private(folio
);
302 const int start_bit
= subpage_calc_start_bit(fs_info
, folio
, locked
, start
, len
);
303 const int nbits
= (len
>> fs_info
->sectorsize_bits
);
307 btrfs_subpage_assert(fs_info
, folio
, start
, len
);
309 spin_lock_irqsave(&subpage
->lock
, flags
);
310 ASSERT(atomic_read(&subpage
->readers
) == 0);
311 ASSERT(bitmap_test_range_all_zero(subpage
->bitmaps
, start_bit
, nbits
));
312 bitmap_set(subpage
->bitmaps
, start_bit
, nbits
);
313 ret
= atomic_add_return(nbits
, &subpage
->writers
);
314 ASSERT(ret
== nbits
);
315 spin_unlock_irqrestore(&subpage
->lock
, flags
);
318 static bool btrfs_subpage_end_and_test_writer(const struct btrfs_fs_info
*fs_info
,
319 struct folio
*folio
, u64 start
, u32 len
)
321 struct btrfs_subpage
*subpage
= folio_get_private(folio
);
322 const int start_bit
= subpage_calc_start_bit(fs_info
, folio
, locked
, start
, len
);
323 const int nbits
= (len
>> fs_info
->sectorsize_bits
);
325 unsigned int cleared
= 0;
329 btrfs_subpage_assert(fs_info
, folio
, start
, len
);
331 spin_lock_irqsave(&subpage
->lock
, flags
);
333 * We have call sites passing @lock_page into
334 * extent_clear_unlock_delalloc() for compression path.
336 * This @locked_page is locked by plain lock_page(), thus its
337 * subpage::writers is 0. Handle them in a special way.
339 if (atomic_read(&subpage
->writers
) == 0) {
340 spin_unlock_irqrestore(&subpage
->lock
, flags
);
344 for_each_set_bit_from(bit
, subpage
->bitmaps
, start_bit
+ nbits
) {
345 clear_bit(bit
, subpage
->bitmaps
);
348 ASSERT(atomic_read(&subpage
->writers
) >= cleared
);
349 last
= atomic_sub_and_test(cleared
, &subpage
->writers
);
350 spin_unlock_irqrestore(&subpage
->lock
, flags
);
355 * Lock a folio for delalloc page writeback.
357 * Return -EAGAIN if the page is not properly initialized.
358 * Return 0 with the page locked, and writer counter updated.
360 * Even with 0 returned, the page still need extra check to make sure
361 * it's really the correct page, as the caller is using
362 * filemap_get_folios_contig(), which can race with page invalidating.
364 int btrfs_folio_start_writer_lock(const struct btrfs_fs_info
*fs_info
,
365 struct folio
*folio
, u64 start
, u32 len
)
367 if (unlikely(!fs_info
) || !btrfs_is_subpage(fs_info
, folio
->mapping
)) {
372 if (!folio_test_private(folio
) || !folio_get_private(folio
)) {
376 btrfs_subpage_clamp_range(folio
, &start
, &len
);
377 btrfs_subpage_start_writer(fs_info
, folio
, start
, len
);
382 * Handle different locked folios:
384 * - Non-subpage folio
387 * - folio locked but without any subpage locked
388 * This happens either before writepage_delalloc() or the delalloc range is
389 * already handled by previous folio.
390 * We can simple unlock it.
392 * - folio locked with subpage range locked.
393 * We go through the locked sectors inside the range and clear their locked
394 * bitmap, reduce the writer lock number, and unlock the page if that's
395 * the last locked range.
397 void btrfs_folio_end_writer_lock(const struct btrfs_fs_info
*fs_info
,
398 struct folio
*folio
, u64 start
, u32 len
)
400 struct btrfs_subpage
*subpage
= folio_get_private(folio
);
402 ASSERT(folio_test_locked(folio
));
404 if (unlikely(!fs_info
) || !btrfs_is_subpage(fs_info
, folio
->mapping
)) {
410 * For subpage case, there are two types of locked page. With or
411 * without writers number.
413 * Since we own the page lock, no one else could touch subpage::writers
414 * and we are safe to do several atomic operations without spinlock.
416 if (atomic_read(&subpage
->writers
) == 0) {
417 /* No writers, locked by plain lock_page(). */
422 btrfs_subpage_clamp_range(folio
, &start
, &len
);
423 if (btrfs_subpage_end_and_test_writer(fs_info
, folio
, start
, len
))
427 void btrfs_folio_end_writer_lock_bitmap(const struct btrfs_fs_info
*fs_info
,
428 struct folio
*folio
, unsigned long bitmap
)
430 struct btrfs_subpage
*subpage
= folio_get_private(folio
);
431 const int start_bit
= fs_info
->sectors_per_page
* btrfs_bitmap_nr_locked
;
437 if (unlikely(!fs_info
) || !btrfs_is_subpage(fs_info
, folio
->mapping
)) {
442 if (atomic_read(&subpage
->writers
) == 0) {
443 /* No writers, locked by plain lock_page(). */
448 spin_lock_irqsave(&subpage
->lock
, flags
);
449 for_each_set_bit(bit
, &bitmap
, fs_info
->sectors_per_page
) {
450 if (test_and_clear_bit(bit
+ start_bit
, subpage
->bitmaps
))
453 ASSERT(atomic_read(&subpage
->writers
) >= cleared
);
454 last
= atomic_sub_and_test(cleared
, &subpage
->writers
);
455 spin_unlock_irqrestore(&subpage
->lock
, flags
);
460 #define subpage_test_bitmap_all_set(fs_info, subpage, name) \
461 bitmap_test_range_all_set(subpage->bitmaps, \
462 fs_info->sectors_per_page * btrfs_bitmap_nr_##name, \
463 fs_info->sectors_per_page)
465 #define subpage_test_bitmap_all_zero(fs_info, subpage, name) \
466 bitmap_test_range_all_zero(subpage->bitmaps, \
467 fs_info->sectors_per_page * btrfs_bitmap_nr_##name, \
468 fs_info->sectors_per_page)
470 void btrfs_subpage_set_uptodate(const struct btrfs_fs_info
*fs_info
,
471 struct folio
*folio
, u64 start
, u32 len
)
473 struct btrfs_subpage
*subpage
= folio_get_private(folio
);
474 unsigned int start_bit
= subpage_calc_start_bit(fs_info
, folio
,
475 uptodate
, start
, len
);
478 spin_lock_irqsave(&subpage
->lock
, flags
);
479 bitmap_set(subpage
->bitmaps
, start_bit
, len
>> fs_info
->sectorsize_bits
);
480 if (subpage_test_bitmap_all_set(fs_info
, subpage
, uptodate
))
481 folio_mark_uptodate(folio
);
482 spin_unlock_irqrestore(&subpage
->lock
, flags
);
485 void btrfs_subpage_clear_uptodate(const struct btrfs_fs_info
*fs_info
,
486 struct folio
*folio
, u64 start
, u32 len
)
488 struct btrfs_subpage
*subpage
= folio_get_private(folio
);
489 unsigned int start_bit
= subpage_calc_start_bit(fs_info
, folio
,
490 uptodate
, start
, len
);
493 spin_lock_irqsave(&subpage
->lock
, flags
);
494 bitmap_clear(subpage
->bitmaps
, start_bit
, len
>> fs_info
->sectorsize_bits
);
495 folio_clear_uptodate(folio
);
496 spin_unlock_irqrestore(&subpage
->lock
, flags
);
499 void btrfs_subpage_set_dirty(const struct btrfs_fs_info
*fs_info
,
500 struct folio
*folio
, u64 start
, u32 len
)
502 struct btrfs_subpage
*subpage
= folio_get_private(folio
);
503 unsigned int start_bit
= subpage_calc_start_bit(fs_info
, folio
,
507 spin_lock_irqsave(&subpage
->lock
, flags
);
508 bitmap_set(subpage
->bitmaps
, start_bit
, len
>> fs_info
->sectorsize_bits
);
509 spin_unlock_irqrestore(&subpage
->lock
, flags
);
510 folio_mark_dirty(folio
);
514 * Extra clear_and_test function for subpage dirty bitmap.
516 * Return true if we're the last bits in the dirty_bitmap and clear the
518 * Return false otherwise.
520 * NOTE: Callers should manually clear page dirty for true case, as we have
521 * extra handling for tree blocks.
523 bool btrfs_subpage_clear_and_test_dirty(const struct btrfs_fs_info
*fs_info
,
524 struct folio
*folio
, u64 start
, u32 len
)
526 struct btrfs_subpage
*subpage
= folio_get_private(folio
);
527 unsigned int start_bit
= subpage_calc_start_bit(fs_info
, folio
,
532 spin_lock_irqsave(&subpage
->lock
, flags
);
533 bitmap_clear(subpage
->bitmaps
, start_bit
, len
>> fs_info
->sectorsize_bits
);
534 if (subpage_test_bitmap_all_zero(fs_info
, subpage
, dirty
))
536 spin_unlock_irqrestore(&subpage
->lock
, flags
);
540 void btrfs_subpage_clear_dirty(const struct btrfs_fs_info
*fs_info
,
541 struct folio
*folio
, u64 start
, u32 len
)
545 last
= btrfs_subpage_clear_and_test_dirty(fs_info
, folio
, start
, len
);
547 folio_clear_dirty_for_io(folio
);
550 void btrfs_subpage_set_writeback(const struct btrfs_fs_info
*fs_info
,
551 struct folio
*folio
, u64 start
, u32 len
)
553 struct btrfs_subpage
*subpage
= folio_get_private(folio
);
554 unsigned int start_bit
= subpage_calc_start_bit(fs_info
, folio
,
555 writeback
, start
, len
);
558 spin_lock_irqsave(&subpage
->lock
, flags
);
559 bitmap_set(subpage
->bitmaps
, start_bit
, len
>> fs_info
->sectorsize_bits
);
560 if (!folio_test_writeback(folio
))
561 folio_start_writeback(folio
);
562 spin_unlock_irqrestore(&subpage
->lock
, flags
);
565 void btrfs_subpage_clear_writeback(const struct btrfs_fs_info
*fs_info
,
566 struct folio
*folio
, u64 start
, u32 len
)
568 struct btrfs_subpage
*subpage
= folio_get_private(folio
);
569 unsigned int start_bit
= subpage_calc_start_bit(fs_info
, folio
,
570 writeback
, start
, len
);
573 spin_lock_irqsave(&subpage
->lock
, flags
);
574 bitmap_clear(subpage
->bitmaps
, start_bit
, len
>> fs_info
->sectorsize_bits
);
575 if (subpage_test_bitmap_all_zero(fs_info
, subpage
, writeback
)) {
576 ASSERT(folio_test_writeback(folio
));
577 folio_end_writeback(folio
);
579 spin_unlock_irqrestore(&subpage
->lock
, flags
);
582 void btrfs_subpage_set_ordered(const struct btrfs_fs_info
*fs_info
,
583 struct folio
*folio
, u64 start
, u32 len
)
585 struct btrfs_subpage
*subpage
= folio_get_private(folio
);
586 unsigned int start_bit
= subpage_calc_start_bit(fs_info
, folio
,
587 ordered
, start
, len
);
590 spin_lock_irqsave(&subpage
->lock
, flags
);
591 bitmap_set(subpage
->bitmaps
, start_bit
, len
>> fs_info
->sectorsize_bits
);
592 folio_set_ordered(folio
);
593 spin_unlock_irqrestore(&subpage
->lock
, flags
);
596 void btrfs_subpage_clear_ordered(const struct btrfs_fs_info
*fs_info
,
597 struct folio
*folio
, u64 start
, u32 len
)
599 struct btrfs_subpage
*subpage
= folio_get_private(folio
);
600 unsigned int start_bit
= subpage_calc_start_bit(fs_info
, folio
,
601 ordered
, start
, len
);
604 spin_lock_irqsave(&subpage
->lock
, flags
);
605 bitmap_clear(subpage
->bitmaps
, start_bit
, len
>> fs_info
->sectorsize_bits
);
606 if (subpage_test_bitmap_all_zero(fs_info
, subpage
, ordered
))
607 folio_clear_ordered(folio
);
608 spin_unlock_irqrestore(&subpage
->lock
, flags
);
611 void btrfs_subpage_set_checked(const struct btrfs_fs_info
*fs_info
,
612 struct folio
*folio
, u64 start
, u32 len
)
614 struct btrfs_subpage
*subpage
= folio_get_private(folio
);
615 unsigned int start_bit
= subpage_calc_start_bit(fs_info
, folio
,
616 checked
, start
, len
);
619 spin_lock_irqsave(&subpage
->lock
, flags
);
620 bitmap_set(subpage
->bitmaps
, start_bit
, len
>> fs_info
->sectorsize_bits
);
621 if (subpage_test_bitmap_all_set(fs_info
, subpage
, checked
))
622 folio_set_checked(folio
);
623 spin_unlock_irqrestore(&subpage
->lock
, flags
);
626 void btrfs_subpage_clear_checked(const struct btrfs_fs_info
*fs_info
,
627 struct folio
*folio
, u64 start
, u32 len
)
629 struct btrfs_subpage
*subpage
= folio_get_private(folio
);
630 unsigned int start_bit
= subpage_calc_start_bit(fs_info
, folio
,
631 checked
, start
, len
);
634 spin_lock_irqsave(&subpage
->lock
, flags
);
635 bitmap_clear(subpage
->bitmaps
, start_bit
, len
>> fs_info
->sectorsize_bits
);
636 folio_clear_checked(folio
);
637 spin_unlock_irqrestore(&subpage
->lock
, flags
);
641 * Unlike set/clear which is dependent on each page status, for test all bits
642 * are tested in the same way.
644 #define IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(name) \
645 bool btrfs_subpage_test_##name(const struct btrfs_fs_info *fs_info, \
646 struct folio *folio, u64 start, u32 len) \
648 struct btrfs_subpage *subpage = folio_get_private(folio); \
649 unsigned int start_bit = subpage_calc_start_bit(fs_info, folio, \
651 unsigned long flags; \
654 spin_lock_irqsave(&subpage->lock, flags); \
655 ret = bitmap_test_range_all_set(subpage->bitmaps, start_bit, \
656 len >> fs_info->sectorsize_bits); \
657 spin_unlock_irqrestore(&subpage->lock, flags); \
660 IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(uptodate
);
661 IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(dirty
);
662 IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(writeback
);
663 IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(ordered
);
664 IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(checked
);
667 * Note that, in selftests (extent-io-tests), we can have empty fs_info passed
668 * in. We only test sectorsize == PAGE_SIZE cases so far, thus we can fall
669 * back to regular sectorsize branch.
671 #define IMPLEMENT_BTRFS_PAGE_OPS(name, folio_set_func, \
672 folio_clear_func, folio_test_func) \
673 void btrfs_folio_set_##name(const struct btrfs_fs_info *fs_info, \
674 struct folio *folio, u64 start, u32 len) \
676 if (unlikely(!fs_info) || \
677 !btrfs_is_subpage(fs_info, folio->mapping)) { \
678 folio_set_func(folio); \
681 btrfs_subpage_set_##name(fs_info, folio, start, len); \
683 void btrfs_folio_clear_##name(const struct btrfs_fs_info *fs_info, \
684 struct folio *folio, u64 start, u32 len) \
686 if (unlikely(!fs_info) || \
687 !btrfs_is_subpage(fs_info, folio->mapping)) { \
688 folio_clear_func(folio); \
691 btrfs_subpage_clear_##name(fs_info, folio, start, len); \
693 bool btrfs_folio_test_##name(const struct btrfs_fs_info *fs_info, \
694 struct folio *folio, u64 start, u32 len) \
696 if (unlikely(!fs_info) || \
697 !btrfs_is_subpage(fs_info, folio->mapping)) \
698 return folio_test_func(folio); \
699 return btrfs_subpage_test_##name(fs_info, folio, start, len); \
701 void btrfs_folio_clamp_set_##name(const struct btrfs_fs_info *fs_info, \
702 struct folio *folio, u64 start, u32 len) \
704 if (unlikely(!fs_info) || \
705 !btrfs_is_subpage(fs_info, folio->mapping)) { \
706 folio_set_func(folio); \
709 btrfs_subpage_clamp_range(folio, &start, &len); \
710 btrfs_subpage_set_##name(fs_info, folio, start, len); \
712 void btrfs_folio_clamp_clear_##name(const struct btrfs_fs_info *fs_info, \
713 struct folio *folio, u64 start, u32 len) \
715 if (unlikely(!fs_info) || \
716 !btrfs_is_subpage(fs_info, folio->mapping)) { \
717 folio_clear_func(folio); \
720 btrfs_subpage_clamp_range(folio, &start, &len); \
721 btrfs_subpage_clear_##name(fs_info, folio, start, len); \
723 bool btrfs_folio_clamp_test_##name(const struct btrfs_fs_info *fs_info, \
724 struct folio *folio, u64 start, u32 len) \
726 if (unlikely(!fs_info) || \
727 !btrfs_is_subpage(fs_info, folio->mapping)) \
728 return folio_test_func(folio); \
729 btrfs_subpage_clamp_range(folio, &start, &len); \
730 return btrfs_subpage_test_##name(fs_info, folio, start, len); \
732 IMPLEMENT_BTRFS_PAGE_OPS(uptodate
, folio_mark_uptodate
, folio_clear_uptodate
,
733 folio_test_uptodate
);
734 IMPLEMENT_BTRFS_PAGE_OPS(dirty
, folio_mark_dirty
, folio_clear_dirty_for_io
,
736 IMPLEMENT_BTRFS_PAGE_OPS(writeback
, folio_start_writeback
, folio_end_writeback
,
737 folio_test_writeback
);
738 IMPLEMENT_BTRFS_PAGE_OPS(ordered
, folio_set_ordered
, folio_clear_ordered
,
740 IMPLEMENT_BTRFS_PAGE_OPS(checked
, folio_set_checked
, folio_clear_checked
,
744 * Make sure not only the page dirty bit is cleared, but also subpage dirty bit
747 void btrfs_folio_assert_not_dirty(const struct btrfs_fs_info
*fs_info
,
748 struct folio
*folio
, u64 start
, u32 len
)
750 struct btrfs_subpage
*subpage
;
751 unsigned int start_bit
;
755 if (!IS_ENABLED(CONFIG_BTRFS_ASSERT
))
758 if (!btrfs_is_subpage(fs_info
, folio
->mapping
)) {
759 ASSERT(!folio_test_dirty(folio
));
763 start_bit
= subpage_calc_start_bit(fs_info
, folio
, dirty
, start
, len
);
764 nbits
= len
>> fs_info
->sectorsize_bits
;
765 subpage
= folio_get_private(folio
);
767 spin_lock_irqsave(&subpage
->lock
, flags
);
768 ASSERT(bitmap_test_range_all_zero(subpage
->bitmaps
, start_bit
, nbits
));
769 spin_unlock_irqrestore(&subpage
->lock
, flags
);
773 * This is for folio already locked by plain lock_page()/folio_lock(), which
774 * doesn't have any subpage awareness.
776 * This populates the involved subpage ranges so that subpage helpers can
777 * properly unlock them.
779 void btrfs_folio_set_writer_lock(const struct btrfs_fs_info
*fs_info
,
780 struct folio
*folio
, u64 start
, u32 len
)
782 struct btrfs_subpage
*subpage
;
784 unsigned int start_bit
;
788 ASSERT(folio_test_locked(folio
));
789 if (unlikely(!fs_info
) || !btrfs_is_subpage(fs_info
, folio
->mapping
))
792 subpage
= folio_get_private(folio
);
793 start_bit
= subpage_calc_start_bit(fs_info
, folio
, locked
, start
, len
);
794 nbits
= len
>> fs_info
->sectorsize_bits
;
795 spin_lock_irqsave(&subpage
->lock
, flags
);
796 /* Target range should not yet be locked. */
797 ASSERT(bitmap_test_range_all_zero(subpage
->bitmaps
, start_bit
, nbits
));
798 bitmap_set(subpage
->bitmaps
, start_bit
, nbits
);
799 ret
= atomic_add_return(nbits
, &subpage
->writers
);
800 ASSERT(ret
<= fs_info
->sectors_per_page
);
801 spin_unlock_irqrestore(&subpage
->lock
, flags
);
805 * Find any subpage writer locked range inside @folio, starting at file offset
806 * @search_start. The caller should ensure the folio is locked.
808 * Return true and update @found_start_ret and @found_len_ret to the first
809 * writer locked range.
810 * Return false if there is no writer locked range.
812 bool btrfs_subpage_find_writer_locked(const struct btrfs_fs_info
*fs_info
,
813 struct folio
*folio
, u64 search_start
,
814 u64
*found_start_ret
, u32
*found_len_ret
)
816 struct btrfs_subpage
*subpage
= folio_get_private(folio
);
817 const u32 sectors_per_page
= fs_info
->sectors_per_page
;
818 const unsigned int len
= PAGE_SIZE
- offset_in_page(search_start
);
819 const unsigned int start_bit
= subpage_calc_start_bit(fs_info
, folio
,
820 locked
, search_start
, len
);
821 const unsigned int locked_bitmap_start
= sectors_per_page
* btrfs_bitmap_nr_locked
;
822 const unsigned int locked_bitmap_end
= locked_bitmap_start
+ sectors_per_page
;
828 ASSERT(folio_test_locked(folio
));
829 spin_lock_irqsave(&subpage
->lock
, flags
);
830 first_set
= find_next_bit(subpage
->bitmaps
, locked_bitmap_end
, start_bit
);
831 if (first_set
>= locked_bitmap_end
)
836 *found_start_ret
= folio_pos(folio
) +
837 ((first_set
- locked_bitmap_start
) << fs_info
->sectorsize_bits
);
839 * Since @first_set is ensured to be smaller than locked_bitmap_end
840 * here, @found_start_ret should be inside the folio.
842 ASSERT(*found_start_ret
< folio_pos(folio
) + PAGE_SIZE
);
844 first_zero
= find_next_zero_bit(subpage
->bitmaps
, locked_bitmap_end
, first_set
);
845 *found_len_ret
= (first_zero
- first_set
) << fs_info
->sectorsize_bits
;
847 spin_unlock_irqrestore(&subpage
->lock
, flags
);
851 #define GET_SUBPAGE_BITMAP(subpage, fs_info, name, dst) \
853 const int sectors_per_page = fs_info->sectors_per_page; \
855 ASSERT(sectors_per_page < BITS_PER_LONG); \
856 *dst = bitmap_read(subpage->bitmaps, \
857 sectors_per_page * btrfs_bitmap_nr_##name, \
861 void __cold
btrfs_subpage_dump_bitmap(const struct btrfs_fs_info
*fs_info
,
862 struct folio
*folio
, u64 start
, u32 len
)
864 struct btrfs_subpage
*subpage
;
865 const u32 sectors_per_page
= fs_info
->sectors_per_page
;
866 unsigned long uptodate_bitmap
;
867 unsigned long dirty_bitmap
;
868 unsigned long writeback_bitmap
;
869 unsigned long ordered_bitmap
;
870 unsigned long checked_bitmap
;
873 ASSERT(folio_test_private(folio
) && folio_get_private(folio
));
874 ASSERT(sectors_per_page
> 1);
875 subpage
= folio_get_private(folio
);
877 spin_lock_irqsave(&subpage
->lock
, flags
);
878 GET_SUBPAGE_BITMAP(subpage
, fs_info
, uptodate
, &uptodate_bitmap
);
879 GET_SUBPAGE_BITMAP(subpage
, fs_info
, dirty
, &dirty_bitmap
);
880 GET_SUBPAGE_BITMAP(subpage
, fs_info
, writeback
, &writeback_bitmap
);
881 GET_SUBPAGE_BITMAP(subpage
, fs_info
, ordered
, &ordered_bitmap
);
882 GET_SUBPAGE_BITMAP(subpage
, fs_info
, checked
, &checked_bitmap
);
883 GET_SUBPAGE_BITMAP(subpage
, fs_info
, locked
, &checked_bitmap
);
884 spin_unlock_irqrestore(&subpage
->lock
, flags
);
886 dump_page(folio_page(folio
, 0), "btrfs subpage dump");
888 "start=%llu len=%u page=%llu, bitmaps uptodate=%*pbl dirty=%*pbl writeback=%*pbl ordered=%*pbl checked=%*pbl",
889 start
, len
, folio_pos(folio
),
890 sectors_per_page
, &uptodate_bitmap
,
891 sectors_per_page
, &dirty_bitmap
,
892 sectors_per_page
, &writeback_bitmap
,
893 sectors_per_page
, &ordered_bitmap
,
894 sectors_per_page
, &checked_bitmap
);
897 void btrfs_get_subpage_dirty_bitmap(struct btrfs_fs_info
*fs_info
,
899 unsigned long *ret_bitmap
)
901 struct btrfs_subpage
*subpage
;
904 ASSERT(folio_test_private(folio
) && folio_get_private(folio
));
905 ASSERT(fs_info
->sectors_per_page
> 1);
906 subpage
= folio_get_private(folio
);
908 spin_lock_irqsave(&subpage
->lock
, flags
);
909 GET_SUBPAGE_BITMAP(subpage
, fs_info
, dirty
, ret_bitmap
);
910 spin_unlock_irqrestore(&subpage
->lock
, flags
);