Merge tag 'trace-printf-v6.13' of git://git.kernel.org/pub/scm/linux/kernel/git/trace...
[drm/drm-misc.git] / drivers / md / persistent-data / dm-transaction-manager.c
blobc7ba4e6cbbc7e9ed4237ea0345a3ab0c0da6b2df
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2011 Red Hat, Inc.
5 * This file is released under the GPL.
6 */
7 #include "dm-transaction-manager.h"
8 #include "dm-space-map.h"
9 #include "dm-space-map-disk.h"
10 #include "dm-space-map-metadata.h"
11 #include "dm-persistent-data-internal.h"
13 #include <linux/export.h>
14 #include <linux/mutex.h>
15 #include <linux/hash.h>
16 #include <linux/slab.h>
17 #include <linux/device-mapper.h>
19 #define DM_MSG_PREFIX "transaction manager"
21 /*----------------------------------------------------------------*/
23 #define PREFETCH_SIZE 128
24 #define PREFETCH_BITS 7
25 #define PREFETCH_SENTINEL ((dm_block_t) -1ULL)
27 struct prefetch_set {
28 struct mutex lock;
29 dm_block_t blocks[PREFETCH_SIZE];
32 static unsigned int prefetch_hash(dm_block_t b)
34 return hash_64(b, PREFETCH_BITS);
37 static void prefetch_wipe(struct prefetch_set *p)
39 unsigned int i;
41 for (i = 0; i < PREFETCH_SIZE; i++)
42 p->blocks[i] = PREFETCH_SENTINEL;
45 static void prefetch_init(struct prefetch_set *p)
47 mutex_init(&p->lock);
48 prefetch_wipe(p);
51 static void prefetch_add(struct prefetch_set *p, dm_block_t b)
53 unsigned int h = prefetch_hash(b);
55 mutex_lock(&p->lock);
56 if (p->blocks[h] == PREFETCH_SENTINEL)
57 p->blocks[h] = b;
59 mutex_unlock(&p->lock);
62 static void prefetch_issue(struct prefetch_set *p, struct dm_block_manager *bm)
64 unsigned int i;
66 mutex_lock(&p->lock);
68 for (i = 0; i < PREFETCH_SIZE; i++)
69 if (p->blocks[i] != PREFETCH_SENTINEL) {
70 dm_bm_prefetch(bm, p->blocks[i]);
71 p->blocks[i] = PREFETCH_SENTINEL;
74 mutex_unlock(&p->lock);
77 /*----------------------------------------------------------------*/
79 struct shadow_info {
80 struct hlist_node hlist;
81 dm_block_t where;
85 * It would be nice if we scaled with the size of transaction.
87 #define DM_HASH_SIZE 256
88 #define DM_HASH_MASK (DM_HASH_SIZE - 1)
90 struct dm_transaction_manager {
91 int is_clone;
92 struct dm_transaction_manager *real;
94 struct dm_block_manager *bm;
95 struct dm_space_map *sm;
97 spinlock_t lock;
98 struct hlist_head buckets[DM_HASH_SIZE];
100 struct prefetch_set prefetches;
103 /*----------------------------------------------------------------*/
105 static int is_shadow(struct dm_transaction_manager *tm, dm_block_t b)
107 int r = 0;
108 unsigned int bucket = dm_hash_block(b, DM_HASH_MASK);
109 struct shadow_info *si;
111 spin_lock(&tm->lock);
112 hlist_for_each_entry(si, tm->buckets + bucket, hlist)
113 if (si->where == b) {
114 r = 1;
115 break;
117 spin_unlock(&tm->lock);
119 return r;
123 * This can silently fail if there's no memory. We're ok with this since
124 * creating redundant shadows causes no harm.
126 static void insert_shadow(struct dm_transaction_manager *tm, dm_block_t b)
128 unsigned int bucket;
129 struct shadow_info *si;
131 si = kmalloc(sizeof(*si), GFP_NOIO);
132 if (si) {
133 si->where = b;
134 bucket = dm_hash_block(b, DM_HASH_MASK);
135 spin_lock(&tm->lock);
136 hlist_add_head(&si->hlist, tm->buckets + bucket);
137 spin_unlock(&tm->lock);
141 static void wipe_shadow_table(struct dm_transaction_manager *tm)
143 struct shadow_info *si;
144 struct hlist_node *tmp;
145 struct hlist_head *bucket;
146 int i;
148 spin_lock(&tm->lock);
149 for (i = 0; i < DM_HASH_SIZE; i++) {
150 bucket = tm->buckets + i;
151 hlist_for_each_entry_safe(si, tmp, bucket, hlist)
152 kfree(si);
154 INIT_HLIST_HEAD(bucket);
157 spin_unlock(&tm->lock);
160 /*----------------------------------------------------------------*/
162 static struct dm_transaction_manager *dm_tm_create(struct dm_block_manager *bm,
163 struct dm_space_map *sm)
165 int i;
166 struct dm_transaction_manager *tm;
168 tm = kmalloc(sizeof(*tm), GFP_KERNEL);
169 if (!tm)
170 return ERR_PTR(-ENOMEM);
172 tm->is_clone = 0;
173 tm->real = NULL;
174 tm->bm = bm;
175 tm->sm = sm;
177 spin_lock_init(&tm->lock);
178 for (i = 0; i < DM_HASH_SIZE; i++)
179 INIT_HLIST_HEAD(tm->buckets + i);
181 prefetch_init(&tm->prefetches);
183 return tm;
186 struct dm_transaction_manager *dm_tm_create_non_blocking_clone(struct dm_transaction_manager *real)
188 struct dm_transaction_manager *tm;
190 tm = kmalloc(sizeof(*tm), GFP_KERNEL);
191 if (tm) {
192 tm->is_clone = 1;
193 tm->real = real;
196 return tm;
198 EXPORT_SYMBOL_GPL(dm_tm_create_non_blocking_clone);
200 void dm_tm_destroy(struct dm_transaction_manager *tm)
202 if (!tm)
203 return;
205 if (!tm->is_clone)
206 wipe_shadow_table(tm);
208 kfree(tm);
210 EXPORT_SYMBOL_GPL(dm_tm_destroy);
212 int dm_tm_pre_commit(struct dm_transaction_manager *tm)
214 int r;
216 if (tm->is_clone)
217 return -EWOULDBLOCK;
219 r = dm_sm_commit(tm->sm);
220 if (r < 0)
221 return r;
223 return dm_bm_flush(tm->bm);
225 EXPORT_SYMBOL_GPL(dm_tm_pre_commit);
227 int dm_tm_commit(struct dm_transaction_manager *tm, struct dm_block *root)
229 if (tm->is_clone)
230 return -EWOULDBLOCK;
232 wipe_shadow_table(tm);
233 dm_bm_unlock(root);
235 return dm_bm_flush(tm->bm);
237 EXPORT_SYMBOL_GPL(dm_tm_commit);
239 int dm_tm_new_block(struct dm_transaction_manager *tm,
240 const struct dm_block_validator *v,
241 struct dm_block **result)
243 int r;
244 dm_block_t new_block;
246 if (tm->is_clone)
247 return -EWOULDBLOCK;
249 r = dm_sm_new_block(tm->sm, &new_block);
250 if (r < 0)
251 return r;
253 r = dm_bm_write_lock_zero(tm->bm, new_block, v, result);
254 if (r < 0) {
255 dm_sm_dec_block(tm->sm, new_block);
256 return r;
260 * New blocks count as shadows in that they don't need to be
261 * shadowed again.
263 insert_shadow(tm, new_block);
265 return 0;
268 static int __shadow_block(struct dm_transaction_manager *tm, dm_block_t orig,
269 const struct dm_block_validator *v,
270 struct dm_block **result)
272 int r;
273 dm_block_t new;
274 struct dm_block *orig_block;
276 r = dm_sm_new_block(tm->sm, &new);
277 if (r < 0)
278 return r;
280 r = dm_sm_dec_block(tm->sm, orig);
281 if (r < 0)
282 return r;
284 r = dm_bm_read_lock(tm->bm, orig, v, &orig_block);
285 if (r < 0)
286 return r;
289 * It would be tempting to use dm_bm_unlock_move here, but some
290 * code, such as the space maps, keeps using the old data structures
291 * secure in the knowledge they won't be changed until the next
292 * transaction. Using unlock_move would force a synchronous read
293 * since the old block would no longer be in the cache.
295 r = dm_bm_write_lock_zero(tm->bm, new, v, result);
296 if (r) {
297 dm_bm_unlock(orig_block);
298 return r;
301 memcpy(dm_block_data(*result), dm_block_data(orig_block),
302 dm_bm_block_size(tm->bm));
304 dm_bm_unlock(orig_block);
305 return r;
308 int dm_tm_shadow_block(struct dm_transaction_manager *tm, dm_block_t orig,
309 const struct dm_block_validator *v, struct dm_block **result,
310 int *inc_children)
312 int r;
314 if (tm->is_clone)
315 return -EWOULDBLOCK;
317 r = dm_sm_count_is_more_than_one(tm->sm, orig, inc_children);
318 if (r < 0)
319 return r;
321 if (is_shadow(tm, orig) && !*inc_children)
322 return dm_bm_write_lock(tm->bm, orig, v, result);
324 r = __shadow_block(tm, orig, v, result);
325 if (r < 0)
326 return r;
327 insert_shadow(tm, dm_block_location(*result));
329 return r;
331 EXPORT_SYMBOL_GPL(dm_tm_shadow_block);
333 int dm_tm_read_lock(struct dm_transaction_manager *tm, dm_block_t b,
334 const struct dm_block_validator *v,
335 struct dm_block **blk)
337 if (tm->is_clone) {
338 int r = dm_bm_read_try_lock(tm->real->bm, b, v, blk);
340 if (r == -EWOULDBLOCK)
341 prefetch_add(&tm->real->prefetches, b);
343 return r;
346 return dm_bm_read_lock(tm->bm, b, v, blk);
348 EXPORT_SYMBOL_GPL(dm_tm_read_lock);
350 void dm_tm_unlock(struct dm_transaction_manager *tm, struct dm_block *b)
352 dm_bm_unlock(b);
354 EXPORT_SYMBOL_GPL(dm_tm_unlock);
356 void dm_tm_inc(struct dm_transaction_manager *tm, dm_block_t b)
359 * The non-blocking clone doesn't support this.
361 BUG_ON(tm->is_clone);
363 dm_sm_inc_block(tm->sm, b);
365 EXPORT_SYMBOL_GPL(dm_tm_inc);
367 void dm_tm_inc_range(struct dm_transaction_manager *tm, dm_block_t b, dm_block_t e)
370 * The non-blocking clone doesn't support this.
372 BUG_ON(tm->is_clone);
374 dm_sm_inc_blocks(tm->sm, b, e);
376 EXPORT_SYMBOL_GPL(dm_tm_inc_range);
378 void dm_tm_dec(struct dm_transaction_manager *tm, dm_block_t b)
381 * The non-blocking clone doesn't support this.
383 BUG_ON(tm->is_clone);
385 dm_sm_dec_block(tm->sm, b);
387 EXPORT_SYMBOL_GPL(dm_tm_dec);
389 void dm_tm_dec_range(struct dm_transaction_manager *tm, dm_block_t b, dm_block_t e)
392 * The non-blocking clone doesn't support this.
394 BUG_ON(tm->is_clone);
396 dm_sm_dec_blocks(tm->sm, b, e);
398 EXPORT_SYMBOL_GPL(dm_tm_dec_range);
400 void dm_tm_with_runs(struct dm_transaction_manager *tm,
401 const __le64 *value_le, unsigned int count, dm_tm_run_fn fn)
403 uint64_t b, begin, end;
404 bool in_run = false;
405 unsigned int i;
407 for (i = 0; i < count; i++, value_le++) {
408 b = le64_to_cpu(*value_le);
410 if (in_run) {
411 if (b == end)
412 end++;
413 else {
414 fn(tm, begin, end);
415 begin = b;
416 end = b + 1;
418 } else {
419 in_run = true;
420 begin = b;
421 end = b + 1;
425 if (in_run)
426 fn(tm, begin, end);
428 EXPORT_SYMBOL_GPL(dm_tm_with_runs);
430 int dm_tm_ref(struct dm_transaction_manager *tm, dm_block_t b,
431 uint32_t *result)
433 if (tm->is_clone)
434 return -EWOULDBLOCK;
436 return dm_sm_get_count(tm->sm, b, result);
439 int dm_tm_block_is_shared(struct dm_transaction_manager *tm, dm_block_t b,
440 int *result)
442 if (tm->is_clone)
443 return -EWOULDBLOCK;
445 return dm_sm_count_is_more_than_one(tm->sm, b, result);
448 struct dm_block_manager *dm_tm_get_bm(struct dm_transaction_manager *tm)
450 return tm->bm;
453 void dm_tm_issue_prefetches(struct dm_transaction_manager *tm)
455 prefetch_issue(&tm->prefetches, tm->bm);
457 EXPORT_SYMBOL_GPL(dm_tm_issue_prefetches);
459 /*----------------------------------------------------------------*/
461 static int dm_tm_create_internal(struct dm_block_manager *bm,
462 dm_block_t sb_location,
463 struct dm_transaction_manager **tm,
464 struct dm_space_map **sm,
465 int create,
466 void *sm_root, size_t sm_len)
468 int r;
470 *sm = dm_sm_metadata_init();
471 if (IS_ERR(*sm))
472 return PTR_ERR(*sm);
474 *tm = dm_tm_create(bm, *sm);
475 if (IS_ERR(*tm)) {
476 dm_sm_destroy(*sm);
477 return PTR_ERR(*tm);
480 if (create) {
481 r = dm_sm_metadata_create(*sm, *tm, dm_bm_nr_blocks(bm),
482 sb_location);
483 if (r) {
484 DMERR("couldn't create metadata space map");
485 goto bad;
488 } else {
489 r = dm_sm_metadata_open(*sm, *tm, sm_root, sm_len);
490 if (r) {
491 DMERR("couldn't open metadata space map");
492 goto bad;
496 return 0;
498 bad:
499 dm_tm_destroy(*tm);
500 dm_sm_destroy(*sm);
501 return r;
504 int dm_tm_create_with_sm(struct dm_block_manager *bm, dm_block_t sb_location,
505 struct dm_transaction_manager **tm,
506 struct dm_space_map **sm)
508 return dm_tm_create_internal(bm, sb_location, tm, sm, 1, NULL, 0);
510 EXPORT_SYMBOL_GPL(dm_tm_create_with_sm);
512 int dm_tm_open_with_sm(struct dm_block_manager *bm, dm_block_t sb_location,
513 void *sm_root, size_t root_len,
514 struct dm_transaction_manager **tm,
515 struct dm_space_map **sm)
517 return dm_tm_create_internal(bm, sb_location, tm, sm, 0, sm_root, root_len);
519 EXPORT_SYMBOL_GPL(dm_tm_open_with_sm);
521 /*----------------------------------------------------------------*/