1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_SB_MEMBERS_H
3 #define _BCACHEFS_SB_MEMBERS_H
6 #include "bkey_types.h"
8 extern char * const bch2_member_error_strs
[];
10 static inline struct bch_member
*
11 __bch2_members_v2_get_mut(struct bch_sb_field_members_v2
*mi
, unsigned i
)
13 return (void *) mi
->_members
+ (i
* le16_to_cpu(mi
->member_bytes
));
16 int bch2_sb_members_v2_init(struct bch_fs
*c
);
17 int bch2_sb_members_cpy_v2_v1(struct bch_sb_handle
*disk_sb
);
18 struct bch_member
*bch2_members_v2_get_mut(struct bch_sb
*sb
, int i
);
19 struct bch_member
bch2_sb_member_get(struct bch_sb
*sb
, int i
);
21 static inline bool bch2_dev_is_online(struct bch_dev
*ca
)
23 return !percpu_ref_is_zero(&ca
->io_ref
);
26 static inline bool bch2_dev_is_readable(struct bch_dev
*ca
)
28 return bch2_dev_is_online(ca
) &&
29 ca
->mi
.state
!= BCH_MEMBER_STATE_failed
;
32 static inline unsigned dev_mask_nr(const struct bch_devs_mask
*devs
)
34 return bitmap_weight(devs
->d
, BCH_SB_MEMBERS_MAX
);
37 static inline bool bch2_dev_list_has_dev(struct bch_devs_list devs
,
40 darray_for_each(devs
, i
)
46 static inline void bch2_dev_list_drop_dev(struct bch_devs_list
*devs
,
49 darray_for_each(*devs
, i
)
51 darray_remove_item(devs
, i
);
56 static inline void bch2_dev_list_add_dev(struct bch_devs_list
*devs
,
59 if (!bch2_dev_list_has_dev(*devs
, dev
)) {
60 BUG_ON(devs
->nr
>= ARRAY_SIZE(devs
->data
));
61 devs
->data
[devs
->nr
++] = dev
;
65 static inline struct bch_devs_list
bch2_dev_list_single(unsigned dev
)
67 return (struct bch_devs_list
) { .nr
= 1, .data
[0] = dev
};
70 static inline struct bch_dev
*__bch2_next_dev_idx(struct bch_fs
*c
, unsigned idx
,
71 const struct bch_devs_mask
*mask
)
73 struct bch_dev
*ca
= NULL
;
76 ? find_next_bit(mask
->d
, c
->sb
.nr_devices
, idx
)
77 : idx
) < c
->sb
.nr_devices
&&
78 !(ca
= rcu_dereference_check(c
->devs
[idx
],
79 lockdep_is_held(&c
->state_lock
))))
85 static inline struct bch_dev
*__bch2_next_dev(struct bch_fs
*c
, struct bch_dev
*ca
,
86 const struct bch_devs_mask
*mask
)
88 return __bch2_next_dev_idx(c
, ca
? ca
->dev_idx
+ 1 : 0, mask
);
91 #define for_each_member_device_rcu(_c, _ca, _mask) \
92 for (struct bch_dev *_ca = NULL; \
93 (_ca = __bch2_next_dev((_c), _ca, (_mask)));)
95 static inline void bch2_dev_get(struct bch_dev
*ca
)
97 #ifdef CONFIG_BCACHEFS_DEBUG
98 BUG_ON(atomic_long_inc_return(&ca
->ref
) <= 1L);
100 percpu_ref_get(&ca
->ref
);
104 static inline void __bch2_dev_put(struct bch_dev
*ca
)
106 #ifdef CONFIG_BCACHEFS_DEBUG
107 long r
= atomic_long_dec_return(&ca
->ref
);
108 if (r
< (long) !ca
->dying
)
109 panic("bch_dev->ref underflow, last put: %pS\n", (void *) ca
->last_put
);
110 ca
->last_put
= _THIS_IP_
;
112 complete(&ca
->ref_completion
);
114 percpu_ref_put(&ca
->ref
);
118 static inline void bch2_dev_put(struct bch_dev
*ca
)
124 static inline struct bch_dev
*bch2_get_next_dev(struct bch_fs
*c
, struct bch_dev
*ca
)
128 if ((ca
= __bch2_next_dev(c
, ca
, NULL
)))
136 * If you break early, you must drop your ref on the current device
138 #define __for_each_member_device(_c, _ca) \
139 for (; (_ca = bch2_get_next_dev(_c, _ca));)
141 #define for_each_member_device(_c, _ca) \
142 for (struct bch_dev *_ca = NULL; \
143 (_ca = bch2_get_next_dev(_c, _ca));)
145 static inline struct bch_dev
*bch2_get_next_online_dev(struct bch_fs
*c
,
151 percpu_ref_put(&ca
->io_ref
);
153 while ((ca
= __bch2_next_dev(c
, ca
, NULL
)) &&
154 (!((1 << ca
->mi
.state
) & state_mask
) ||
155 !percpu_ref_tryget(&ca
->io_ref
)))
162 #define __for_each_online_member(_c, _ca, state_mask) \
163 for (struct bch_dev *_ca = NULL; \
164 (_ca = bch2_get_next_online_dev(_c, _ca, state_mask));)
166 #define for_each_online_member(c, ca) \
167 __for_each_online_member(c, ca, ~0)
169 #define for_each_rw_member(c, ca) \
170 __for_each_online_member(c, ca, BIT(BCH_MEMBER_STATE_rw))
172 #define for_each_readable_member(c, ca) \
173 __for_each_online_member(c, ca, BIT( BCH_MEMBER_STATE_rw)|BIT(BCH_MEMBER_STATE_ro))
175 static inline bool bch2_dev_exists(const struct bch_fs
*c
, unsigned dev
)
177 return dev
< c
->sb
.nr_devices
&& c
->devs
[dev
];
180 static inline bool bucket_valid(const struct bch_dev
*ca
, u64 b
)
182 return b
- ca
->mi
.first_bucket
< ca
->mi
.nbuckets_minus_first
;
185 static inline struct bch_dev
*bch2_dev_have_ref(const struct bch_fs
*c
, unsigned dev
)
187 EBUG_ON(!bch2_dev_exists(c
, dev
));
189 return rcu_dereference_check(c
->devs
[dev
], 1);
192 static inline struct bch_dev
*bch2_dev_locked(struct bch_fs
*c
, unsigned dev
)
194 EBUG_ON(!bch2_dev_exists(c
, dev
));
196 return rcu_dereference_protected(c
->devs
[dev
],
197 lockdep_is_held(&c
->sb_lock
) ||
198 lockdep_is_held(&c
->state_lock
));
201 static inline struct bch_dev
*bch2_dev_rcu_noerror(struct bch_fs
*c
, unsigned dev
)
203 return c
&& dev
< c
->sb
.nr_devices
204 ? rcu_dereference(c
->devs
[dev
])
208 void bch2_dev_missing(struct bch_fs
*, unsigned);
210 static inline struct bch_dev
*bch2_dev_rcu(struct bch_fs
*c
, unsigned dev
)
212 struct bch_dev
*ca
= bch2_dev_rcu_noerror(c
, dev
);
214 bch2_dev_missing(c
, dev
);
218 static inline struct bch_dev
*bch2_dev_tryget_noerror(struct bch_fs
*c
, unsigned dev
)
221 struct bch_dev
*ca
= bch2_dev_rcu_noerror(c
, dev
);
228 static inline struct bch_dev
*bch2_dev_tryget(struct bch_fs
*c
, unsigned dev
)
230 struct bch_dev
*ca
= bch2_dev_tryget_noerror(c
, dev
);
232 bch2_dev_missing(c
, dev
);
236 static inline struct bch_dev
*bch2_dev_bucket_tryget_noerror(struct bch_fs
*c
, struct bpos bucket
)
238 struct bch_dev
*ca
= bch2_dev_tryget_noerror(c
, bucket
.inode
);
239 if (ca
&& !bucket_valid(ca
, bucket
.offset
)) {
246 void bch2_dev_bucket_missing(struct bch_fs
*, struct bpos
);
248 static inline struct bch_dev
*bch2_dev_bucket_tryget(struct bch_fs
*c
, struct bpos bucket
)
250 struct bch_dev
*ca
= bch2_dev_bucket_tryget_noerror(c
, bucket
);
252 bch2_dev_bucket_missing(c
, bucket
);
256 static inline struct bch_dev
*bch2_dev_iterate_noerror(struct bch_fs
*c
, struct bch_dev
*ca
, unsigned dev_idx
)
258 if (ca
&& ca
->dev_idx
== dev_idx
)
261 return bch2_dev_tryget_noerror(c
, dev_idx
);
264 static inline struct bch_dev
*bch2_dev_iterate(struct bch_fs
*c
, struct bch_dev
*ca
, unsigned dev_idx
)
266 if (ca
&& ca
->dev_idx
== dev_idx
)
269 return bch2_dev_tryget(c
, dev_idx
);
272 static inline struct bch_dev
*bch2_dev_get_ioref(struct bch_fs
*c
, unsigned dev
, int rw
)
275 struct bch_dev
*ca
= bch2_dev_rcu(c
, dev
);
276 if (ca
&& !percpu_ref_tryget(&ca
->io_ref
))
281 (ca
->mi
.state
== BCH_MEMBER_STATE_rw
||
282 (ca
->mi
.state
== BCH_MEMBER_STATE_ro
&& rw
== READ
)))
286 percpu_ref_put(&ca
->io_ref
);
290 /* XXX kill, move to struct bch_fs */
291 static inline struct bch_devs_mask
bch2_online_devs(struct bch_fs
*c
)
293 struct bch_devs_mask devs
;
295 memset(&devs
, 0, sizeof(devs
));
296 for_each_online_member(c
, ca
)
297 __set_bit(ca
->dev_idx
, devs
.d
);
301 extern const struct bch_sb_field_ops bch_sb_field_ops_members_v1
;
302 extern const struct bch_sb_field_ops bch_sb_field_ops_members_v2
;
304 static inline bool bch2_member_alive(struct bch_member
*m
)
306 return !bch2_is_zero(&m
->uuid
, sizeof(m
->uuid
));
309 static inline bool bch2_member_exists(struct bch_sb
*sb
, unsigned dev
)
311 if (dev
< sb
->nr_devices
) {
312 struct bch_member m
= bch2_sb_member_get(sb
, dev
);
313 return bch2_member_alive(&m
);
318 unsigned bch2_sb_nr_devices(const struct bch_sb
*);
320 static inline struct bch_member_cpu
bch2_mi_to_cpu(struct bch_member
*mi
)
322 return (struct bch_member_cpu
) {
323 .nbuckets
= le64_to_cpu(mi
->nbuckets
),
324 .nbuckets_minus_first
= le64_to_cpu(mi
->nbuckets
) -
325 le16_to_cpu(mi
->first_bucket
),
326 .first_bucket
= le16_to_cpu(mi
->first_bucket
),
327 .bucket_size
= le16_to_cpu(mi
->bucket_size
),
328 .group
= BCH_MEMBER_GROUP(mi
),
329 .state
= BCH_MEMBER_STATE(mi
),
330 .discard
= BCH_MEMBER_DISCARD(mi
),
331 .data_allowed
= BCH_MEMBER_DATA_ALLOWED(mi
),
332 .durability
= BCH_MEMBER_DURABILITY(mi
)
333 ? BCH_MEMBER_DURABILITY(mi
) - 1
335 .freespace_initialized
= BCH_MEMBER_FREESPACE_INITIALIZED(mi
),
336 .valid
= bch2_member_alive(mi
),
337 .btree_bitmap_shift
= mi
->btree_bitmap_shift
,
338 .btree_allocated_bitmap
= le64_to_cpu(mi
->btree_allocated_bitmap
),
342 void bch2_sb_members_from_cpu(struct bch_fs
*);
344 void bch2_dev_io_errors_to_text(struct printbuf
*, struct bch_dev
*);
345 void bch2_dev_errors_reset(struct bch_dev
*);
347 static inline bool bch2_dev_btree_bitmap_marked_sectors(struct bch_dev
*ca
, u64 start
, unsigned sectors
)
349 u64 end
= start
+ sectors
;
351 if (end
> 64ULL << ca
->mi
.btree_bitmap_shift
)
354 for (unsigned bit
= start
>> ca
->mi
.btree_bitmap_shift
;
355 (u64
) bit
<< ca
->mi
.btree_bitmap_shift
< end
;
357 if (!(ca
->mi
.btree_allocated_bitmap
& BIT_ULL(bit
)))
362 bool bch2_dev_btree_bitmap_marked(struct bch_fs
*, struct bkey_s_c
);
363 void bch2_dev_btree_bitmap_mark(struct bch_fs
*, struct bkey_s_c
);
365 int bch2_sb_member_alloc(struct bch_fs
*);
367 #endif /* _BCACHEFS_SB_MEMBERS_H */