Linux 6.13-rc4
[linux.git] / fs / bcachefs / fs-io-pagecache.h
blobfad911cf50680186c763e22e4bff1bd6646399c0
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_FS_IO_PAGECACHE_H
3 #define _BCACHEFS_FS_IO_PAGECACHE_H
5 #include <linux/pagemap.h>
7 typedef DARRAY(struct folio *) folios;
9 int bch2_filemap_get_contig_folios_d(struct address_space *, loff_t,
10 u64, fgf_t, gfp_t, folios *);
11 int bch2_write_invalidate_inode_pages_range(struct address_space *, loff_t, loff_t);
14 * Use u64 for the end pos and sector helpers because if the folio covers the
15 * max supported range of the mapping, the start offset of the next folio
16 * overflows loff_t. This breaks much of the range based processing in the
17 * buffered write path.
19 static inline u64 folio_end_pos(struct folio *folio)
21 return folio_pos(folio) + folio_size(folio);
24 static inline size_t folio_sectors(struct folio *folio)
26 return PAGE_SECTORS << folio_order(folio);
29 static inline loff_t folio_sector(struct folio *folio)
31 return folio_pos(folio) >> 9;
34 static inline u64 folio_end_sector(struct folio *folio)
36 return folio_end_pos(folio) >> 9;
39 #define BCH_FOLIO_SECTOR_STATE() \
40 x(unallocated) \
41 x(reserved) \
42 x(dirty) \
43 x(dirty_reserved) \
44 x(allocated)
46 enum bch_folio_sector_state {
47 #define x(n) SECTOR_##n,
48 BCH_FOLIO_SECTOR_STATE()
49 #undef x
52 struct bch_folio_sector {
53 /* Uncompressed, fully allocated replicas (or on disk reservation): */
54 u8 nr_replicas:4,
55 /* Owns PAGE_SECTORS * replicas_reserved sized in memory reservation: */
56 replicas_reserved:4;
57 u8 state;
60 struct bch_folio {
61 spinlock_t lock;
62 atomic_t write_count;
64 * Is the sector state up to date with the btree?
65 * (Not the data itself)
67 bool uptodate;
68 struct bch_folio_sector s[];
71 /* Helper for when we need to add debug instrumentation: */
72 static inline void bch2_folio_sector_set(struct folio *folio,
73 struct bch_folio *s,
74 unsigned i, unsigned n)
76 s->s[i].state = n;
79 /* file offset (to folio offset) to bch_folio_sector index */
80 static inline int folio_pos_to_s(struct folio *folio, loff_t pos)
82 u64 f_offset = pos - folio_pos(folio);
84 BUG_ON(pos < folio_pos(folio) || pos >= folio_end_pos(folio));
85 return f_offset >> SECTOR_SHIFT;
88 /* for newly allocated folios: */
89 static inline void __bch2_folio_release(struct folio *folio)
91 kfree(folio_detach_private(folio));
94 static inline void bch2_folio_release(struct folio *folio)
96 EBUG_ON(!folio_test_locked(folio));
97 __bch2_folio_release(folio);
100 static inline struct bch_folio *__bch2_folio(struct folio *folio)
102 return folio_get_private(folio);
105 static inline struct bch_folio *bch2_folio(struct folio *folio)
107 EBUG_ON(!folio_test_locked(folio));
109 return __bch2_folio(folio);
112 struct bch_folio *__bch2_folio_create(struct folio *, gfp_t);
113 struct bch_folio *bch2_folio_create(struct folio *, gfp_t);
115 struct bch2_folio_reservation {
116 struct disk_reservation disk;
117 struct quota_res quota;
120 static inline unsigned inode_nr_replicas(struct bch_fs *c, struct bch_inode_info *inode)
122 /* XXX: this should not be open coded */
123 return inode->ei_inode.bi_data_replicas
124 ? inode->ei_inode.bi_data_replicas - 1
125 : c->opts.data_replicas;
128 static inline void bch2_folio_reservation_init(struct bch_fs *c,
129 struct bch_inode_info *inode,
130 struct bch2_folio_reservation *res)
132 memset(res, 0, sizeof(*res));
134 res->disk.nr_replicas = inode_nr_replicas(c, inode);
137 int bch2_folio_set(struct bch_fs *, subvol_inum, struct folio **, unsigned);
138 void bch2_bio_page_state_set(struct bio *, struct bkey_s_c);
140 void bch2_mark_pagecache_unallocated(struct bch_inode_info *, u64, u64);
141 int bch2_mark_pagecache_reserved(struct bch_inode_info *, u64 *, u64, bool);
143 int bch2_get_folio_disk_reservation(struct bch_fs *,
144 struct bch_inode_info *,
145 struct folio *, bool);
147 void bch2_folio_reservation_put(struct bch_fs *,
148 struct bch_inode_info *,
149 struct bch2_folio_reservation *);
150 int bch2_folio_reservation_get(struct bch_fs *,
151 struct bch_inode_info *,
152 struct folio *,
153 struct bch2_folio_reservation *,
154 size_t, size_t);
155 ssize_t bch2_folio_reservation_get_partial(struct bch_fs *,
156 struct bch_inode_info *,
157 struct folio *,
158 struct bch2_folio_reservation *,
159 size_t, size_t);
161 void bch2_set_folio_dirty(struct bch_fs *,
162 struct bch_inode_info *,
163 struct folio *,
164 struct bch2_folio_reservation *,
165 unsigned, unsigned);
167 vm_fault_t bch2_page_fault(struct vm_fault *);
168 vm_fault_t bch2_page_mkwrite(struct vm_fault *);
169 void bch2_invalidate_folio(struct folio *, size_t, size_t);
170 bool bch2_release_folio(struct folio *, gfp_t);
172 loff_t bch2_seek_pagecache_data(struct inode *, loff_t, loff_t, unsigned, bool);
173 loff_t bch2_seek_pagecache_hole(struct inode *, loff_t, loff_t, unsigned, bool);
174 int bch2_clamp_data_hole(struct inode *, u64 *, u64 *, unsigned, bool);
176 #endif /* _BCACHEFS_FS_IO_PAGECACHE_H */