1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_BTREE_IO_H
3 #define _BCACHEFS_BTREE_IO_H
5 #include "bkey_methods.h"
7 #include "btree_locking.h"
10 #include "io_write_types.h"
16 struct btree_node_read_all
;
18 static inline void set_btree_node_dirty_acct(struct bch_fs
*c
, struct btree
*b
)
20 if (!test_and_set_bit(BTREE_NODE_dirty
, &b
->flags
))
21 atomic_long_inc(&c
->btree_cache
.nr_dirty
);
24 static inline void clear_btree_node_dirty_acct(struct bch_fs
*c
, struct btree
*b
)
26 if (test_and_clear_bit(BTREE_NODE_dirty
, &b
->flags
))
27 atomic_long_dec(&c
->btree_cache
.nr_dirty
);
30 static inline unsigned btree_ptr_sectors_written(struct bkey_s_c k
)
32 return k
.k
->type
== KEY_TYPE_btree_ptr_v2
33 ? le16_to_cpu(bkey_s_c_to_btree_ptr_v2(k
).v
->sectors_written
)
37 struct btree_read_bio
{
40 struct btree_node_read_all
*ra
;
42 unsigned have_ioref
:1;
44 struct extent_ptr_decoded pick
;
45 struct work_struct work
;
49 struct btree_write_bio
{
50 struct work_struct work
;
51 __BKEY_PADDED(key
, BKEY_BTREE_PTR_VAL_U64s_MAX
);
54 unsigned sector_offset
;
55 struct bch_write_bio wbio
;
58 void bch2_btree_node_io_unlock(struct btree
*);
59 void bch2_btree_node_io_lock(struct btree
*);
60 void __bch2_btree_node_wait_on_read(struct btree
*);
61 void __bch2_btree_node_wait_on_write(struct btree
*);
62 void bch2_btree_node_wait_on_read(struct btree
*);
63 void bch2_btree_node_wait_on_write(struct btree
*);
70 bool bch2_compact_whiteouts(struct bch_fs
*, struct btree
*,
73 static inline bool should_compact_bset_lazy(struct btree
*b
,
76 unsigned total_u64s
= bset_u64s(t
);
77 unsigned dead_u64s
= bset_dead_u64s(b
, t
);
79 return dead_u64s
> 64 && dead_u64s
* 3 > total_u64s
;
82 static inline bool bch2_maybe_compact_whiteouts(struct bch_fs
*c
, struct btree
*b
)
85 if (should_compact_bset_lazy(b
, t
))
86 return bch2_compact_whiteouts(c
, b
, COMPACT_LAZY
);
91 static inline struct nonce
btree_nonce(struct bset
*i
, unsigned offset
)
93 return (struct nonce
) {{
94 [0] = cpu_to_le32(offset
),
95 [1] = ((__le32
*) &i
->seq
)[0],
96 [2] = ((__le32
*) &i
->seq
)[1],
97 [3] = ((__le32
*) &i
->journal_seq
)[0]^BCH_NONCE_BTREE
,
101 static inline int bset_encrypt(struct bch_fs
*c
, struct bset
*i
, unsigned offset
)
103 struct nonce nonce
= btree_nonce(i
, offset
);
107 struct btree_node
*bn
= container_of(i
, struct btree_node
, keys
);
108 unsigned bytes
= (void *) &bn
->keys
- (void *) &bn
->flags
;
110 ret
= bch2_encrypt(c
, BSET_CSUM_TYPE(i
), nonce
,
115 nonce
= nonce_add(nonce
, round_up(bytes
, CHACHA_BLOCK_SIZE
));
118 return bch2_encrypt(c
, BSET_CSUM_TYPE(i
), nonce
, i
->_data
,
119 vstruct_end(i
) - (void *) i
->_data
);
122 void bch2_btree_sort_into(struct bch_fs
*, struct btree
*, struct btree
*);
124 void bch2_btree_node_drop_keys_outside_node(struct btree
*);
126 void bch2_btree_build_aux_trees(struct btree
*);
127 void bch2_btree_init_next(struct btree_trans
*, struct btree
*);
129 int bch2_btree_node_read_done(struct bch_fs
*, struct bch_dev
*,
130 struct btree
*, bool, bool *);
131 void bch2_btree_node_read(struct btree_trans
*, struct btree
*, bool);
132 int bch2_btree_root_read(struct bch_fs
*, enum btree_id
,
133 const struct bkey_i
*, unsigned);
135 bool bch2_btree_post_write_cleanup(struct bch_fs
*, struct btree
*);
137 enum btree_write_flags
{
138 __BTREE_WRITE_ONLY_IF_NEED
= BTREE_WRITE_TYPE_BITS
,
139 __BTREE_WRITE_ALREADY_STARTED
,
141 #define BTREE_WRITE_ONLY_IF_NEED BIT(__BTREE_WRITE_ONLY_IF_NEED)
142 #define BTREE_WRITE_ALREADY_STARTED BIT(__BTREE_WRITE_ALREADY_STARTED)
144 void __bch2_btree_node_write(struct bch_fs
*, struct btree
*, unsigned);
145 void bch2_btree_node_write(struct bch_fs
*, struct btree
*,
146 enum six_lock_type
, unsigned);
148 static inline void btree_node_write_if_need(struct bch_fs
*c
, struct btree
*b
,
149 enum six_lock_type lock_held
)
151 bch2_btree_node_write(c
, b
, lock_held
, BTREE_WRITE_ONLY_IF_NEED
);
154 bool bch2_btree_flush_all_reads(struct bch_fs
*);
155 bool bch2_btree_flush_all_writes(struct bch_fs
*);
157 static inline void compat_bformat(unsigned level
, enum btree_id btree_id
,
158 unsigned version
, unsigned big_endian
,
159 int write
, struct bkey_format
*f
)
161 if (version
< bcachefs_metadata_version_inode_btree_change
&&
162 btree_id
== BTREE_ID_inodes
) {
163 swap(f
->bits_per_field
[BKEY_FIELD_INODE
],
164 f
->bits_per_field
[BKEY_FIELD_OFFSET
]);
165 swap(f
->field_offset
[BKEY_FIELD_INODE
],
166 f
->field_offset
[BKEY_FIELD_OFFSET
]);
169 if (version
< bcachefs_metadata_version_snapshot
&&
170 (level
|| btree_type_has_snapshots(btree_id
))) {
172 ~(~0ULL << f
->bits_per_field
[BKEY_FIELD_SNAPSHOT
]);
174 f
->field_offset
[BKEY_FIELD_SNAPSHOT
] = write
176 : cpu_to_le64(U32_MAX
- max_packed
);
180 static inline void compat_bpos(unsigned level
, enum btree_id btree_id
,
181 unsigned version
, unsigned big_endian
,
182 int write
, struct bpos
*p
)
184 if (big_endian
!= CPU_BIG_ENDIAN
)
187 if (version
< bcachefs_metadata_version_inode_btree_change
&&
188 btree_id
== BTREE_ID_inodes
)
189 swap(p
->inode
, p
->offset
);
192 static inline void compat_btree_node(unsigned level
, enum btree_id btree_id
,
193 unsigned version
, unsigned big_endian
,
195 struct btree_node
*bn
)
197 if (version
< bcachefs_metadata_version_inode_btree_change
&&
198 btree_id_is_extents(btree_id
) &&
199 !bpos_eq(bn
->min_key
, POS_MIN
) &&
201 bn
->min_key
= bpos_nosnap_predecessor(bn
->min_key
);
203 if (version
< bcachefs_metadata_version_snapshot
&&
205 bn
->max_key
.snapshot
= 0;
207 compat_bpos(level
, btree_id
, version
, big_endian
, write
, &bn
->min_key
);
208 compat_bpos(level
, btree_id
, version
, big_endian
, write
, &bn
->max_key
);
210 if (version
< bcachefs_metadata_version_snapshot
&&
212 bn
->max_key
.snapshot
= U32_MAX
;
214 if (version
< bcachefs_metadata_version_inode_btree_change
&&
215 btree_id_is_extents(btree_id
) &&
216 !bpos_eq(bn
->min_key
, POS_MIN
) &&
218 bn
->min_key
= bpos_nosnap_successor(bn
->min_key
);
221 void bch2_btree_write_stats_to_text(struct printbuf
*, struct bch_fs
*);
223 #endif /* _BCACHEFS_BTREE_IO_H */