1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_ALLOC_FOREGROUND_H
3 #define _BCACHEFS_ALLOC_FOREGROUND_H
6 #include "alloc_types.h"
8 #include "sb-members.h"
10 #include <linux/hash.h>
17 extern const char * const bch2_watermarks
[];
19 void bch2_reset_alloc_cursors(struct bch_fs
*);
21 struct dev_alloc_list
{
23 u8 devs
[BCH_SB_MEMBERS_MAX
];
26 struct dev_alloc_list
bch2_dev_alloc_list(struct bch_fs
*,
27 struct dev_stripe_state
*,
28 struct bch_devs_mask
*);
29 void bch2_dev_stripe_increment(struct bch_dev
*, struct dev_stripe_state
*);
31 long bch2_bucket_alloc_new_fs(struct bch_dev
*);
33 static inline struct bch_dev
*ob_dev(struct bch_fs
*c
, struct open_bucket
*ob
)
35 return bch2_dev_have_ref(c
, ob
->dev
);
38 struct open_bucket
*bch2_bucket_alloc(struct bch_fs
*, struct bch_dev
*,
39 enum bch_watermark
, enum bch_data_type
,
42 static inline void ob_push(struct bch_fs
*c
, struct open_buckets
*obs
,
43 struct open_bucket
*ob
)
45 BUG_ON(obs
->nr
>= ARRAY_SIZE(obs
->v
));
47 obs
->v
[obs
->nr
++] = ob
- c
->open_buckets
;
50 #define open_bucket_for_each(_c, _obs, _ob, _i) \
52 (_i) < (_obs)->nr && \
53 ((_ob) = (_c)->open_buckets + (_obs)->v[_i], true); \
56 static inline struct open_bucket
*ec_open_bucket(struct bch_fs
*c
,
57 struct open_buckets
*obs
)
59 struct open_bucket
*ob
;
62 open_bucket_for_each(c
, obs
, ob
, i
)
69 void bch2_open_bucket_write_error(struct bch_fs
*,
70 struct open_buckets
*, unsigned);
72 void __bch2_open_bucket_put(struct bch_fs
*, struct open_bucket
*);
74 static inline void bch2_open_bucket_put(struct bch_fs
*c
, struct open_bucket
*ob
)
76 if (atomic_dec_and_test(&ob
->pin
))
77 __bch2_open_bucket_put(c
, ob
);
80 static inline void bch2_open_buckets_put(struct bch_fs
*c
,
81 struct open_buckets
*ptrs
)
83 struct open_bucket
*ob
;
86 open_bucket_for_each(c
, ptrs
, ob
, i
)
87 bch2_open_bucket_put(c
, ob
);
91 static inline void bch2_alloc_sectors_done_inlined(struct bch_fs
*c
, struct write_point
*wp
)
93 struct open_buckets ptrs
= { .nr
= 0 }, keep
= { .nr
= 0 };
94 struct open_bucket
*ob
;
97 open_bucket_for_each(c
, &wp
->ptrs
, ob
, i
)
98 ob_push(c
, !ob
->sectors_free
? &ptrs
: &keep
, ob
);
101 mutex_unlock(&wp
->lock
);
103 bch2_open_buckets_put(c
, &ptrs
);
106 static inline void bch2_open_bucket_get(struct bch_fs
*c
,
107 struct write_point
*wp
,
108 struct open_buckets
*ptrs
)
110 struct open_bucket
*ob
;
113 open_bucket_for_each(c
, &wp
->ptrs
, ob
, i
) {
114 ob
->data_type
= wp
->data_type
;
115 atomic_inc(&ob
->pin
);
116 ob_push(c
, ptrs
, ob
);
120 static inline open_bucket_idx_t
*open_bucket_hashslot(struct bch_fs
*c
,
121 unsigned dev
, u64 bucket
)
123 return c
->open_buckets_hash
+
124 (jhash_3words(dev
, bucket
, bucket
>> 32, 0) &
125 (OPEN_BUCKETS_COUNT
- 1));
128 static inline bool bch2_bucket_is_open(struct bch_fs
*c
, unsigned dev
, u64 bucket
)
130 open_bucket_idx_t slot
= *open_bucket_hashslot(c
, dev
, bucket
);
133 struct open_bucket
*ob
= &c
->open_buckets
[slot
];
135 if (ob
->dev
== dev
&& ob
->bucket
== bucket
)
144 static inline bool bch2_bucket_is_open_safe(struct bch_fs
*c
, unsigned dev
, u64 bucket
)
148 if (bch2_bucket_is_open(c
, dev
, bucket
))
151 spin_lock(&c
->freelist_lock
);
152 ret
= bch2_bucket_is_open(c
, dev
, bucket
);
153 spin_unlock(&c
->freelist_lock
);
158 enum bch_write_flags
;
159 int bch2_bucket_alloc_set_trans(struct btree_trans
*, struct open_buckets
*,
160 struct dev_stripe_state
*, struct bch_devs_mask
*,
161 unsigned, unsigned *, bool *, enum bch_write_flags
,
162 enum bch_data_type
, enum bch_watermark
,
165 int bch2_alloc_sectors_start_trans(struct btree_trans
*,
167 struct write_point_specifier
,
168 struct bch_devs_list
*,
171 enum bch_write_flags
,
173 struct write_point
**);
175 struct bch_extent_ptr
bch2_ob_ptr(struct bch_fs
*, struct open_bucket
*);
178 * Append pointers to the space we just allocated to @k, and mark @sectors space
179 * as allocated out of @ob
182 bch2_alloc_sectors_append_ptrs_inlined(struct bch_fs
*c
, struct write_point
*wp
,
183 struct bkey_i
*k
, unsigned sectors
,
186 struct open_bucket
*ob
;
189 BUG_ON(sectors
> wp
->sectors_free
);
190 wp
->sectors_free
-= sectors
;
191 wp
->sectors_allocated
+= sectors
;
193 open_bucket_for_each(c
, &wp
->ptrs
, ob
, i
) {
194 struct bch_dev
*ca
= ob_dev(c
, ob
);
195 struct bch_extent_ptr ptr
= bch2_ob_ptr(c
, ob
);
197 ptr
.cached
= cached
||
198 (!ca
->mi
.durability
&&
199 wp
->data_type
== BCH_DATA_user
);
201 bch2_bkey_append_ptr(k
, ptr
);
203 BUG_ON(sectors
> ob
->sectors_free
);
204 ob
->sectors_free
-= sectors
;
208 void bch2_alloc_sectors_append_ptrs(struct bch_fs
*, struct write_point
*,
209 struct bkey_i
*, unsigned, bool);
210 void bch2_alloc_sectors_done(struct bch_fs
*, struct write_point
*);
212 void bch2_open_buckets_stop(struct bch_fs
*c
, struct bch_dev
*, bool);
214 static inline struct write_point_specifier
writepoint_hashed(unsigned long v
)
216 return (struct write_point_specifier
) { .v
= v
| 1 };
219 static inline struct write_point_specifier
writepoint_ptr(struct write_point
*wp
)
221 return (struct write_point_specifier
) { .v
= (unsigned long) wp
};
224 void bch2_fs_allocator_foreground_init(struct bch_fs
*);
226 void bch2_open_bucket_to_text(struct printbuf
*, struct bch_fs
*, struct open_bucket
*);
227 void bch2_open_buckets_to_text(struct printbuf
*, struct bch_fs
*, struct bch_dev
*);
228 void bch2_open_buckets_partial_to_text(struct printbuf
*, struct bch_fs
*);
230 void bch2_write_points_to_text(struct printbuf
*, struct bch_fs
*);
232 void bch2_fs_alloc_debug_to_text(struct printbuf
*, struct bch_fs
*);
233 void bch2_dev_alloc_debug_to_text(struct printbuf
*, struct bch_dev
*);
235 void __bch2_wait_on_allocator(struct bch_fs
*, struct closure
*);
236 static inline void bch2_wait_on_allocator(struct bch_fs
*c
, struct closure
*cl
)
238 if (cl
->closure_get_happened
)
239 __bch2_wait_on_allocator(c
, cl
);
242 #endif /* _BCACHEFS_ALLOC_FOREGROUND_H */