1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Code for manipulating bucket marks for garbage collection.
5 * Copyright 2014 Datera, Inc.
11 #include "buckets_types.h"
13 #include "sb-members.h"
15 static inline u64
sector_to_bucket(const struct bch_dev
*ca
, sector_t s
)
17 return div_u64(s
, ca
->mi
.bucket_size
);
20 static inline sector_t
bucket_to_sector(const struct bch_dev
*ca
, size_t b
)
22 return ((sector_t
) b
) * ca
->mi
.bucket_size
;
25 static inline sector_t
bucket_remainder(const struct bch_dev
*ca
, sector_t s
)
29 div_u64_rem(s
, ca
->mi
.bucket_size
, &remainder
);
33 static inline u64
sector_to_bucket_and_offset(const struct bch_dev
*ca
, sector_t s
, u32
*offset
)
35 return div_u64_rem(s
, ca
->mi
.bucket_size
, offset
);
38 #define for_each_bucket(_b, _buckets) \
39 for (_b = (_buckets)->b + (_buckets)->first_bucket; \
40 _b < (_buckets)->b + (_buckets)->nbuckets; _b++)
45 * We need to cram a spinlock in a single byte, because that's what we have left
46 * in struct bucket, and we care about the size of these - during fsck, we need
47 * in memory state for every single bucket on every device.
50 * while (xchg(&b->lock, 1) cpu_relax();
51 * but, it turns out not all architectures support xchg on a single byte.
53 * So now we use bit_spin_lock(), with fun games since we can't burn a whole
54 * ulong for this - we just need to make sure the lock bit always ends up in the
58 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
59 #define BUCKET_LOCK_BITNR 0
61 #define BUCKET_LOCK_BITNR (BITS_PER_LONG - 1)
64 union ulong_byte_assert
{
69 static inline void bucket_unlock(struct bucket
*b
)
71 BUILD_BUG_ON(!((union ulong_byte_assert
) { .ulong
= 1UL << BUCKET_LOCK_BITNR
}).byte
);
73 clear_bit_unlock(BUCKET_LOCK_BITNR
, (void *) &b
->lock
);
74 wake_up_bit((void *) &b
->lock
, BUCKET_LOCK_BITNR
);
77 static inline void bucket_lock(struct bucket
*b
)
79 wait_on_bit_lock((void *) &b
->lock
, BUCKET_LOCK_BITNR
,
80 TASK_UNINTERRUPTIBLE
);
83 static inline struct bucket
*gc_bucket(struct bch_dev
*ca
, size_t b
)
85 return genradix_ptr(&ca
->buckets_gc
, b
);
88 static inline struct bucket_gens
*bucket_gens(struct bch_dev
*ca
)
90 return rcu_dereference_check(ca
->bucket_gens
,
92 percpu_rwsem_is_held(&ca
->fs
->mark_lock
) ||
93 lockdep_is_held(&ca
->fs
->state_lock
) ||
94 lockdep_is_held(&ca
->bucket_lock
));
97 static inline u8
*bucket_gen(struct bch_dev
*ca
, size_t b
)
99 struct bucket_gens
*gens
= bucket_gens(ca
);
101 if (b
- gens
->first_bucket
>= gens
->nbuckets_minus_first
)
106 static inline int bucket_gen_get_rcu(struct bch_dev
*ca
, size_t b
)
108 u8
*gen
= bucket_gen(ca
, b
);
109 return gen
? *gen
: -1;
112 static inline int bucket_gen_get(struct bch_dev
*ca
, size_t b
)
115 int ret
= bucket_gen_get_rcu(ca
, b
);
120 static inline size_t PTR_BUCKET_NR(const struct bch_dev
*ca
,
121 const struct bch_extent_ptr
*ptr
)
123 return sector_to_bucket(ca
, ptr
->offset
);
126 static inline struct bpos
PTR_BUCKET_POS(const struct bch_dev
*ca
,
127 const struct bch_extent_ptr
*ptr
)
129 return POS(ptr
->dev
, PTR_BUCKET_NR(ca
, ptr
));
132 static inline struct bpos
PTR_BUCKET_POS_OFFSET(const struct bch_dev
*ca
,
133 const struct bch_extent_ptr
*ptr
,
136 return POS(ptr
->dev
, sector_to_bucket_and_offset(ca
, ptr
->offset
, bucket_offset
));
139 static inline struct bucket
*PTR_GC_BUCKET(struct bch_dev
*ca
,
140 const struct bch_extent_ptr
*ptr
)
142 return gc_bucket(ca
, PTR_BUCKET_NR(ca
, ptr
));
145 static inline enum bch_data_type
ptr_data_type(const struct bkey
*k
,
146 const struct bch_extent_ptr
*ptr
)
148 if (bkey_is_btree_ptr(k
))
149 return BCH_DATA_btree
;
151 return ptr
->cached
? BCH_DATA_cached
: BCH_DATA_user
;
154 static inline s64
ptr_disk_sectors(s64 sectors
, struct extent_ptr_decoded p
)
156 EBUG_ON(sectors
< 0);
158 return crc_is_compressed(p
.crc
)
159 ? DIV_ROUND_UP_ULL(sectors
* p
.crc
.compressed_size
,
160 p
.crc
.uncompressed_size
)
164 static inline int gen_cmp(u8 a
, u8 b
)
169 static inline int gen_after(u8 a
, u8 b
)
171 int r
= gen_cmp(a
, b
);
173 return r
> 0 ? r
: 0;
176 static inline int dev_ptr_stale_rcu(struct bch_dev
*ca
, const struct bch_extent_ptr
*ptr
)
178 int gen
= bucket_gen_get_rcu(ca
, PTR_BUCKET_NR(ca
, ptr
));
179 return gen
< 0 ? gen
: gen_after(gen
, ptr
->gen
);
183 * dev_ptr_stale() - check if a pointer points into a bucket that has been
186 static inline int dev_ptr_stale(struct bch_dev
*ca
, const struct bch_extent_ptr
*ptr
)
189 int ret
= dev_ptr_stale_rcu(ca
, ptr
);
196 void bch2_dev_usage_read_fast(struct bch_dev
*, struct bch_dev_usage
*);
197 static inline struct bch_dev_usage
bch2_dev_usage_read(struct bch_dev
*ca
)
199 struct bch_dev_usage ret
;
201 bch2_dev_usage_read_fast(ca
, &ret
);
205 void bch2_dev_usage_to_text(struct printbuf
*, struct bch_dev
*, struct bch_dev_usage
*);
207 static inline u64
bch2_dev_buckets_reserved(struct bch_dev
*ca
, enum bch_watermark watermark
)
212 case BCH_WATERMARK_NR
:
214 case BCH_WATERMARK_stripe
:
215 reserved
+= ca
->mi
.nbuckets
>> 6;
217 case BCH_WATERMARK_normal
:
218 reserved
+= ca
->mi
.nbuckets
>> 6;
220 case BCH_WATERMARK_copygc
:
221 reserved
+= ca
->nr_btree_reserve
;
223 case BCH_WATERMARK_btree
:
224 reserved
+= ca
->nr_btree_reserve
;
226 case BCH_WATERMARK_btree_copygc
:
227 case BCH_WATERMARK_reclaim
:
228 case BCH_WATERMARK_interior_updates
:
235 static inline u64
dev_buckets_free(struct bch_dev
*ca
,
236 struct bch_dev_usage usage
,
237 enum bch_watermark watermark
)
240 usage
.d
[BCH_DATA_free
].buckets
-
241 ca
->nr_open_buckets
-
242 bch2_dev_buckets_reserved(ca
, watermark
));
245 static inline u64
__dev_buckets_available(struct bch_dev
*ca
,
246 struct bch_dev_usage usage
,
247 enum bch_watermark watermark
)
250 usage
.d
[BCH_DATA_free
].buckets
251 + usage
.d
[BCH_DATA_cached
].buckets
252 + usage
.d
[BCH_DATA_need_gc_gens
].buckets
253 + usage
.d
[BCH_DATA_need_discard
].buckets
254 - ca
->nr_open_buckets
255 - bch2_dev_buckets_reserved(ca
, watermark
));
258 static inline u64
dev_buckets_available(struct bch_dev
*ca
,
259 enum bch_watermark watermark
)
261 return __dev_buckets_available(ca
, bch2_dev_usage_read(ca
), watermark
);
264 /* Filesystem usage: */
266 static inline unsigned dev_usage_u64s(void)
268 return sizeof(struct bch_dev_usage
) / sizeof(u64
);
271 struct bch_fs_usage_short
272 bch2_fs_usage_read_short(struct bch_fs
*);
274 int bch2_bucket_ref_update(struct btree_trans
*, struct bch_dev
*,
275 struct bkey_s_c
, const struct bch_extent_ptr
*,
276 s64
, enum bch_data_type
, u8
, u8
, u32
*);
278 int bch2_check_fix_ptrs(struct btree_trans
*,
279 enum btree_id
, unsigned, struct bkey_s_c
,
280 enum btree_iter_update_trigger_flags
);
282 int bch2_trigger_extent(struct btree_trans
*, enum btree_id
, unsigned,
283 struct bkey_s_c
, struct bkey_s
,
284 enum btree_iter_update_trigger_flags
);
285 int bch2_trigger_reservation(struct btree_trans
*, enum btree_id
, unsigned,
286 struct bkey_s_c
, struct bkey_s
,
287 enum btree_iter_update_trigger_flags
);
289 #define trigger_run_overwrite_then_insert(_fn, _trans, _btree_id, _level, _old, _new, _flags)\
294 ret = _fn(_trans, _btree_id, _level, _old, _flags & ~BTREE_TRIGGER_insert); \
295 if (!ret && _new.k->type) \
296 ret = _fn(_trans, _btree_id, _level, _new.s_c, _flags & ~BTREE_TRIGGER_overwrite);\
300 void bch2_trans_account_disk_usage_change(struct btree_trans
*);
302 int bch2_trans_mark_metadata_bucket(struct btree_trans
*, struct bch_dev
*, u64
,
303 enum bch_data_type
, unsigned,
304 enum btree_iter_update_trigger_flags
);
305 int bch2_trans_mark_dev_sb(struct bch_fs
*, struct bch_dev
*,
306 enum btree_iter_update_trigger_flags
);
307 int bch2_trans_mark_dev_sbs_flags(struct bch_fs
*,
308 enum btree_iter_update_trigger_flags
);
309 int bch2_trans_mark_dev_sbs(struct bch_fs
*);
311 static inline bool is_superblock_bucket(struct bch_dev
*ca
, u64 b
)
313 struct bch_sb_layout
*layout
= &ca
->disk_sb
.sb
->layout
;
314 u64 b_offset
= bucket_to_sector(ca
, b
);
315 u64 b_end
= bucket_to_sector(ca
, b
+ 1);
321 for (i
= 0; i
< layout
->nr_superblocks
; i
++) {
322 u64 offset
= le64_to_cpu(layout
->sb_offset
[i
]);
323 u64 end
= offset
+ (1 << layout
->sb_max_size_bits
);
325 if (!(offset
>= b_end
|| end
<= b_offset
))
332 static inline const char *bch2_data_type_str(enum bch_data_type type
)
334 return type
< BCH_DATA_NR
335 ? __bch2_data_types
[type
]
336 : "(invalid data type)";
339 /* disk reservations: */
341 static inline void bch2_disk_reservation_put(struct bch_fs
*c
,
342 struct disk_reservation
*res
)
345 this_cpu_sub(*c
->online_reserved
, res
->sectors
);
350 enum bch_reservation_flags
{
351 BCH_DISK_RESERVATION_NOFAIL
= 1 << 0,
352 BCH_DISK_RESERVATION_PARTIAL
= 1 << 1,
355 int __bch2_disk_reservation_add(struct bch_fs
*, struct disk_reservation
*,
356 u64
, enum bch_reservation_flags
);
358 static inline int bch2_disk_reservation_add(struct bch_fs
*c
, struct disk_reservation
*res
,
359 u64 sectors
, enum bch_reservation_flags flags
)
364 old
= this_cpu_read(c
->pcpu
->sectors_available
);
367 return __bch2_disk_reservation_add(c
, res
, sectors
, flags
);
370 } while (!this_cpu_try_cmpxchg(c
->pcpu
->sectors_available
, &old
, new));
372 this_cpu_add(*c
->online_reserved
, sectors
);
373 res
->sectors
+= sectors
;
376 return __bch2_disk_reservation_add(c
, res
, sectors
, flags
);
380 static inline struct disk_reservation
381 bch2_disk_reservation_init(struct bch_fs
*c
, unsigned nr_replicas
)
383 return (struct disk_reservation
) {
387 .gen
= c
->capacity_gen
,
389 .nr_replicas
= nr_replicas
,
393 static inline int bch2_disk_reservation_get(struct bch_fs
*c
,
394 struct disk_reservation
*res
,
395 u64 sectors
, unsigned nr_replicas
,
398 *res
= bch2_disk_reservation_init(c
, nr_replicas
);
400 return bch2_disk_reservation_add(c
, res
, sectors
* nr_replicas
, flags
);
403 #define RESERVE_FACTOR 6
405 static inline u64
avail_factor(u64 r
)
407 return div_u64(r
<< RESERVE_FACTOR
, (1 << RESERVE_FACTOR
) + 1);
410 void bch2_buckets_nouse_free(struct bch_fs
*);
411 int bch2_buckets_nouse_alloc(struct bch_fs
*);
413 int bch2_dev_buckets_resize(struct bch_fs
*, struct bch_dev
*, u64
);
414 void bch2_dev_buckets_free(struct bch_dev
*);
415 int bch2_dev_buckets_alloc(struct bch_fs
*, struct bch_dev
*);
417 #endif /* _BUCKETS_H */