sh_eth: fix EESIPR values for SH77{34|63}
[linux/fpc-iii.git] / drivers / md / persistent-data / dm-transaction-manager.c
blobabe2c5dd0993b6083f2e9cebe54ebb1d8de541ac
1 /*
2 * Copyright (C) 2011 Red Hat, Inc.
4 * This file is released under the GPL.
5 */
6 #include "dm-transaction-manager.h"
7 #include "dm-space-map.h"
8 #include "dm-space-map-disk.h"
9 #include "dm-space-map-metadata.h"
10 #include "dm-persistent-data-internal.h"
12 #include <linux/export.h>
13 #include <linux/mutex.h>
14 #include <linux/hash.h>
15 #include <linux/slab.h>
16 #include <linux/device-mapper.h>
18 #define DM_MSG_PREFIX "transaction manager"
20 /*----------------------------------------------------------------*/
22 #define PREFETCH_SIZE 128
23 #define PREFETCH_BITS 7
24 #define PREFETCH_SENTINEL ((dm_block_t) -1ULL)
26 struct prefetch_set {
27 struct mutex lock;
28 dm_block_t blocks[PREFETCH_SIZE];
31 static unsigned prefetch_hash(dm_block_t b)
33 return hash_64(b, PREFETCH_BITS);
36 static void prefetch_wipe(struct prefetch_set *p)
38 unsigned i;
39 for (i = 0; i < PREFETCH_SIZE; i++)
40 p->blocks[i] = PREFETCH_SENTINEL;
43 static void prefetch_init(struct prefetch_set *p)
45 mutex_init(&p->lock);
46 prefetch_wipe(p);
49 static void prefetch_add(struct prefetch_set *p, dm_block_t b)
51 unsigned h = prefetch_hash(b);
53 mutex_lock(&p->lock);
54 if (p->blocks[h] == PREFETCH_SENTINEL)
55 p->blocks[h] = b;
57 mutex_unlock(&p->lock);
60 static void prefetch_issue(struct prefetch_set *p, struct dm_block_manager *bm)
62 unsigned i;
64 mutex_lock(&p->lock);
66 for (i = 0; i < PREFETCH_SIZE; i++)
67 if (p->blocks[i] != PREFETCH_SENTINEL) {
68 dm_bm_prefetch(bm, p->blocks[i]);
69 p->blocks[i] = PREFETCH_SENTINEL;
72 mutex_unlock(&p->lock);
75 /*----------------------------------------------------------------*/
77 struct shadow_info {
78 struct hlist_node hlist;
79 dm_block_t where;
83 * It would be nice if we scaled with the size of transaction.
85 #define DM_HASH_SIZE 256
86 #define DM_HASH_MASK (DM_HASH_SIZE - 1)
88 struct dm_transaction_manager {
89 int is_clone;
90 struct dm_transaction_manager *real;
92 struct dm_block_manager *bm;
93 struct dm_space_map *sm;
95 spinlock_t lock;
96 struct hlist_head buckets[DM_HASH_SIZE];
98 struct prefetch_set prefetches;
101 /*----------------------------------------------------------------*/
103 static int is_shadow(struct dm_transaction_manager *tm, dm_block_t b)
105 int r = 0;
106 unsigned bucket = dm_hash_block(b, DM_HASH_MASK);
107 struct shadow_info *si;
109 spin_lock(&tm->lock);
110 hlist_for_each_entry(si, tm->buckets + bucket, hlist)
111 if (si->where == b) {
112 r = 1;
113 break;
115 spin_unlock(&tm->lock);
117 return r;
121 * This can silently fail if there's no memory. We're ok with this since
122 * creating redundant shadows causes no harm.
124 static void insert_shadow(struct dm_transaction_manager *tm, dm_block_t b)
126 unsigned bucket;
127 struct shadow_info *si;
129 si = kmalloc(sizeof(*si), GFP_NOIO);
130 if (si) {
131 si->where = b;
132 bucket = dm_hash_block(b, DM_HASH_MASK);
133 spin_lock(&tm->lock);
134 hlist_add_head(&si->hlist, tm->buckets + bucket);
135 spin_unlock(&tm->lock);
139 static void wipe_shadow_table(struct dm_transaction_manager *tm)
141 struct shadow_info *si;
142 struct hlist_node *tmp;
143 struct hlist_head *bucket;
144 int i;
146 spin_lock(&tm->lock);
147 for (i = 0; i < DM_HASH_SIZE; i++) {
148 bucket = tm->buckets + i;
149 hlist_for_each_entry_safe(si, tmp, bucket, hlist)
150 kfree(si);
152 INIT_HLIST_HEAD(bucket);
155 spin_unlock(&tm->lock);
158 /*----------------------------------------------------------------*/
160 static struct dm_transaction_manager *dm_tm_create(struct dm_block_manager *bm,
161 struct dm_space_map *sm)
163 int i;
164 struct dm_transaction_manager *tm;
166 tm = kmalloc(sizeof(*tm), GFP_KERNEL);
167 if (!tm)
168 return ERR_PTR(-ENOMEM);
170 tm->is_clone = 0;
171 tm->real = NULL;
172 tm->bm = bm;
173 tm->sm = sm;
175 spin_lock_init(&tm->lock);
176 for (i = 0; i < DM_HASH_SIZE; i++)
177 INIT_HLIST_HEAD(tm->buckets + i);
179 prefetch_init(&tm->prefetches);
181 return tm;
184 struct dm_transaction_manager *dm_tm_create_non_blocking_clone(struct dm_transaction_manager *real)
186 struct dm_transaction_manager *tm;
188 tm = kmalloc(sizeof(*tm), GFP_KERNEL);
189 if (tm) {
190 tm->is_clone = 1;
191 tm->real = real;
194 return tm;
196 EXPORT_SYMBOL_GPL(dm_tm_create_non_blocking_clone);
198 void dm_tm_destroy(struct dm_transaction_manager *tm)
200 if (!tm->is_clone)
201 wipe_shadow_table(tm);
203 kfree(tm);
205 EXPORT_SYMBOL_GPL(dm_tm_destroy);
207 int dm_tm_pre_commit(struct dm_transaction_manager *tm)
209 int r;
211 if (tm->is_clone)
212 return -EWOULDBLOCK;
214 r = dm_sm_commit(tm->sm);
215 if (r < 0)
216 return r;
218 return dm_bm_flush(tm->bm);
220 EXPORT_SYMBOL_GPL(dm_tm_pre_commit);
222 int dm_tm_commit(struct dm_transaction_manager *tm, struct dm_block *root)
224 if (tm->is_clone)
225 return -EWOULDBLOCK;
227 wipe_shadow_table(tm);
228 dm_bm_unlock(root);
230 return dm_bm_flush(tm->bm);
232 EXPORT_SYMBOL_GPL(dm_tm_commit);
234 int dm_tm_new_block(struct dm_transaction_manager *tm,
235 struct dm_block_validator *v,
236 struct dm_block **result)
238 int r;
239 dm_block_t new_block;
241 if (tm->is_clone)
242 return -EWOULDBLOCK;
244 r = dm_sm_new_block(tm->sm, &new_block);
245 if (r < 0)
246 return r;
248 r = dm_bm_write_lock_zero(tm->bm, new_block, v, result);
249 if (r < 0) {
250 dm_sm_dec_block(tm->sm, new_block);
251 return r;
255 * New blocks count as shadows in that they don't need to be
256 * shadowed again.
258 insert_shadow(tm, new_block);
260 return 0;
263 static int __shadow_block(struct dm_transaction_manager *tm, dm_block_t orig,
264 struct dm_block_validator *v,
265 struct dm_block **result)
267 int r;
268 dm_block_t new;
269 struct dm_block *orig_block;
271 r = dm_sm_new_block(tm->sm, &new);
272 if (r < 0)
273 return r;
275 r = dm_sm_dec_block(tm->sm, orig);
276 if (r < 0)
277 return r;
279 r = dm_bm_read_lock(tm->bm, orig, v, &orig_block);
280 if (r < 0)
281 return r;
284 * It would be tempting to use dm_bm_unlock_move here, but some
285 * code, such as the space maps, keeps using the old data structures
286 * secure in the knowledge they won't be changed until the next
287 * transaction. Using unlock_move would force a synchronous read
288 * since the old block would no longer be in the cache.
290 r = dm_bm_write_lock_zero(tm->bm, new, v, result);
291 if (r) {
292 dm_bm_unlock(orig_block);
293 return r;
296 memcpy(dm_block_data(*result), dm_block_data(orig_block),
297 dm_bm_block_size(tm->bm));
299 dm_bm_unlock(orig_block);
300 return r;
303 int dm_tm_shadow_block(struct dm_transaction_manager *tm, dm_block_t orig,
304 struct dm_block_validator *v, struct dm_block **result,
305 int *inc_children)
307 int r;
309 if (tm->is_clone)
310 return -EWOULDBLOCK;
312 r = dm_sm_count_is_more_than_one(tm->sm, orig, inc_children);
313 if (r < 0)
314 return r;
316 if (is_shadow(tm, orig) && !*inc_children)
317 return dm_bm_write_lock(tm->bm, orig, v, result);
319 r = __shadow_block(tm, orig, v, result);
320 if (r < 0)
321 return r;
322 insert_shadow(tm, dm_block_location(*result));
324 return r;
326 EXPORT_SYMBOL_GPL(dm_tm_shadow_block);
328 int dm_tm_read_lock(struct dm_transaction_manager *tm, dm_block_t b,
329 struct dm_block_validator *v,
330 struct dm_block **blk)
332 if (tm->is_clone) {
333 int r = dm_bm_read_try_lock(tm->real->bm, b, v, blk);
335 if (r == -EWOULDBLOCK)
336 prefetch_add(&tm->real->prefetches, b);
338 return r;
341 return dm_bm_read_lock(tm->bm, b, v, blk);
343 EXPORT_SYMBOL_GPL(dm_tm_read_lock);
345 void dm_tm_unlock(struct dm_transaction_manager *tm, struct dm_block *b)
347 dm_bm_unlock(b);
349 EXPORT_SYMBOL_GPL(dm_tm_unlock);
351 void dm_tm_inc(struct dm_transaction_manager *tm, dm_block_t b)
354 * The non-blocking clone doesn't support this.
356 BUG_ON(tm->is_clone);
358 dm_sm_inc_block(tm->sm, b);
360 EXPORT_SYMBOL_GPL(dm_tm_inc);
362 void dm_tm_dec(struct dm_transaction_manager *tm, dm_block_t b)
365 * The non-blocking clone doesn't support this.
367 BUG_ON(tm->is_clone);
369 dm_sm_dec_block(tm->sm, b);
371 EXPORT_SYMBOL_GPL(dm_tm_dec);
373 int dm_tm_ref(struct dm_transaction_manager *tm, dm_block_t b,
374 uint32_t *result)
376 if (tm->is_clone)
377 return -EWOULDBLOCK;
379 return dm_sm_get_count(tm->sm, b, result);
382 struct dm_block_manager *dm_tm_get_bm(struct dm_transaction_manager *tm)
384 return tm->bm;
387 void dm_tm_issue_prefetches(struct dm_transaction_manager *tm)
389 prefetch_issue(&tm->prefetches, tm->bm);
391 EXPORT_SYMBOL_GPL(dm_tm_issue_prefetches);
393 /*----------------------------------------------------------------*/
395 static int dm_tm_create_internal(struct dm_block_manager *bm,
396 dm_block_t sb_location,
397 struct dm_transaction_manager **tm,
398 struct dm_space_map **sm,
399 int create,
400 void *sm_root, size_t sm_len)
402 int r;
404 *sm = dm_sm_metadata_init();
405 if (IS_ERR(*sm))
406 return PTR_ERR(*sm);
408 *tm = dm_tm_create(bm, *sm);
409 if (IS_ERR(*tm)) {
410 dm_sm_destroy(*sm);
411 return PTR_ERR(*tm);
414 if (create) {
415 r = dm_sm_metadata_create(*sm, *tm, dm_bm_nr_blocks(bm),
416 sb_location);
417 if (r) {
418 DMERR("couldn't create metadata space map");
419 goto bad;
422 } else {
423 r = dm_sm_metadata_open(*sm, *tm, sm_root, sm_len);
424 if (r) {
425 DMERR("couldn't open metadata space map");
426 goto bad;
430 return 0;
432 bad:
433 dm_tm_destroy(*tm);
434 dm_sm_destroy(*sm);
435 return r;
438 int dm_tm_create_with_sm(struct dm_block_manager *bm, dm_block_t sb_location,
439 struct dm_transaction_manager **tm,
440 struct dm_space_map **sm)
442 return dm_tm_create_internal(bm, sb_location, tm, sm, 1, NULL, 0);
444 EXPORT_SYMBOL_GPL(dm_tm_create_with_sm);
446 int dm_tm_open_with_sm(struct dm_block_manager *bm, dm_block_t sb_location,
447 void *sm_root, size_t root_len,
448 struct dm_transaction_manager **tm,
449 struct dm_space_map **sm)
451 return dm_tm_create_internal(bm, sb_location, tm, sm, 0, sm_root, root_len);
453 EXPORT_SYMBOL_GPL(dm_tm_open_with_sm);
455 /*----------------------------------------------------------------*/