Merge branch 'akpm'
[linux-2.6/next.git] / drivers / md / persistent-data / dm-transaction-manager.c
blobe58e89ecfd6760426485f008f5d6e56f2ab1f38b
1 /*
2 * Copyright (C) 2011 Red Hat, Inc. All rights reserved.
4 * This file is released under the GPL.
5 */
6 #include "dm-transaction-manager.h"
7 #include "dm-space-map.h"
8 #include "dm-space-map-disk.h"
9 #include "dm-space-map-metadata.h"
10 #include "dm-persistent-data-internal.h"
12 #include <linux/export.h>
13 #include <linux/slab.h>
14 #include <linux/device-mapper.h>
16 #define DM_MSG_PREFIX "transaction manager"
18 /*----------------------------------------------------------------*/
20 struct shadow_info {
21 struct hlist_node hlist;
22 dm_block_t where;
26 * It would be nice if we scaled with the size of transaction.
28 #define HASH_SIZE 256
29 #define HASH_MASK (HASH_SIZE - 1)
31 struct dm_transaction_manager {
32 int is_clone;
33 struct dm_transaction_manager *real;
35 struct dm_block_manager *bm;
36 struct dm_space_map *sm;
38 spinlock_t lock;
39 struct hlist_head buckets[HASH_SIZE];
42 /*----------------------------------------------------------------*/
44 static int is_shadow(struct dm_transaction_manager *tm, dm_block_t b)
46 int r = 0;
47 unsigned bucket = dm_hash_block(b, HASH_MASK);
48 struct shadow_info *si;
49 struct hlist_node *n;
51 spin_lock(&tm->lock);
53 hlist_for_each_entry(si, n, tm->buckets + bucket, hlist)
54 if (si->where == b) {
55 r = 1;
56 break;
59 spin_unlock(&tm->lock);
61 return r;
65 * This can silently fail if there's no memory. We're ok with this since
66 * creating redundant shadows causes no harm.
68 static void insert_shadow(struct dm_transaction_manager *tm, dm_block_t b)
70 unsigned bucket;
71 struct shadow_info *si;
73 si = kmalloc(sizeof(*si), GFP_NOIO);
74 if (si) {
75 si->where = b;
76 bucket = dm_hash_block(b, HASH_MASK);
78 spin_lock(&tm->lock);
79 hlist_add_head(&si->hlist, tm->buckets + bucket);
80 spin_unlock(&tm->lock);
84 static void wipe_shadow_table(struct dm_transaction_manager *tm)
86 struct shadow_info *si;
87 struct hlist_node *n, *tmp;
88 struct hlist_head *bucket;
89 int i;
91 spin_lock(&tm->lock);
92 for (i = 0; i < HASH_SIZE; i++) {
93 bucket = tm->buckets + i;
94 hlist_for_each_entry_safe(si, n, tmp, bucket, hlist)
95 kfree(si);
97 INIT_HLIST_HEAD(bucket);
99 spin_unlock(&tm->lock);
102 /*----------------------------------------------------------------*/
104 static struct dm_transaction_manager *dm_tm_create(struct dm_block_manager *bm,
105 struct dm_space_map *sm)
107 int i;
108 struct dm_transaction_manager *tm;
110 tm = kmalloc(sizeof(*tm), GFP_KERNEL);
111 if (!tm)
112 return ERR_PTR(-ENOMEM);
114 tm->is_clone = 0;
115 tm->real = NULL;
116 tm->bm = bm;
117 tm->sm = sm;
119 spin_lock_init(&tm->lock);
120 for (i = 0; i < HASH_SIZE; i++)
121 INIT_HLIST_HEAD(tm->buckets + i);
123 return tm;
126 struct dm_transaction_manager *dm_tm_create_non_blocking_clone(struct dm_transaction_manager *real)
128 struct dm_transaction_manager *tm;
130 tm = kmalloc(sizeof(*tm), GFP_KERNEL);
131 if (tm) {
132 tm->is_clone = 1;
133 tm->real = real;
136 return tm;
138 EXPORT_SYMBOL_GPL(dm_tm_create_non_blocking_clone);
140 void dm_tm_destroy(struct dm_transaction_manager *tm)
142 kfree(tm);
144 EXPORT_SYMBOL_GPL(dm_tm_destroy);
146 int dm_tm_pre_commit(struct dm_transaction_manager *tm)
148 int r;
150 if (tm->is_clone)
151 return -EWOULDBLOCK;
153 r = dm_sm_commit(tm->sm);
154 if (r < 0)
155 return r;
157 return 0;
159 EXPORT_SYMBOL_GPL(dm_tm_pre_commit);
161 int dm_tm_commit(struct dm_transaction_manager *tm, struct dm_block *root)
163 if (tm->is_clone)
164 return -EWOULDBLOCK;
166 wipe_shadow_table(tm);
168 return dm_bm_flush_and_unlock(tm->bm, root);
170 EXPORT_SYMBOL_GPL(dm_tm_commit);
172 int dm_tm_new_block(struct dm_transaction_manager *tm,
173 struct dm_block_validator *v,
174 struct dm_block **result)
176 int r;
177 dm_block_t new_block;
179 if (tm->is_clone)
180 return -EWOULDBLOCK;
182 r = dm_sm_new_block(tm->sm, &new_block);
183 if (r < 0)
184 return r;
186 r = dm_bm_write_lock_zero(tm->bm, new_block, v, result);
187 if (r < 0) {
188 dm_sm_dec_block(tm->sm, new_block);
189 return r;
193 * New blocks count as shadows in that they don't need to be
194 * shadowed again.
196 insert_shadow(tm, new_block);
198 return 0;
201 static int __shadow_block(struct dm_transaction_manager *tm, dm_block_t orig,
202 struct dm_block_validator *v,
203 struct dm_block **result, int *inc_children)
205 int r;
206 dm_block_t new;
207 uint32_t count;
208 struct dm_block *orig_block;
210 r = dm_sm_new_block(tm->sm, &new);
211 if (r < 0)
212 return r;
214 r = dm_bm_write_lock_zero(tm->bm, new, v, result);
215 if (r < 0)
216 goto bad_dec_block;
218 r = dm_bm_read_lock(tm->bm, orig, v, &orig_block);
219 if (r < 0)
220 goto bad_dec_block;
222 memcpy(dm_block_data(*result), dm_block_data(orig_block),
223 dm_bm_block_size(tm->bm));
225 r = dm_bm_unlock(orig_block);
226 if (r < 0)
227 goto bad_dec_block;
229 r = dm_sm_get_count(tm->sm, orig, &count);
230 if (r < 0)
231 goto bad;
233 r = dm_sm_dec_block(tm->sm, orig);
234 if (r < 0)
235 goto bad;
237 *inc_children = count > 1;
239 return 0;
241 bad:
242 dm_bm_unlock(*result);
243 bad_dec_block:
244 dm_sm_dec_block(tm->sm, new);
246 return r;
249 int dm_tm_shadow_block(struct dm_transaction_manager *tm, dm_block_t orig,
250 struct dm_block_validator *v, struct dm_block **result,
251 int *inc_children)
253 int r, more_than_one;
255 if (tm->is_clone)
256 return -EWOULDBLOCK;
258 if (is_shadow(tm, orig)) {
259 r = dm_sm_count_is_more_than_one(tm->sm, orig, &more_than_one);
260 if (r < 0)
261 return r;
263 if (!more_than_one) {
264 *inc_children = 0;
265 return dm_bm_write_lock(tm->bm, orig, v, result);
267 /* fall through */
270 r = __shadow_block(tm, orig, v, result, inc_children);
271 if (r < 0)
272 return r;
274 insert_shadow(tm, dm_block_location(*result));
276 return r;
279 int dm_tm_read_lock(struct dm_transaction_manager *tm, dm_block_t b,
280 struct dm_block_validator *v,
281 struct dm_block **blk)
283 if (tm->is_clone)
284 return dm_bm_read_try_lock(tm->real->bm, b, v, blk);
286 return dm_bm_read_lock(tm->bm, b, v, blk);
289 int dm_tm_unlock(struct dm_transaction_manager *tm, struct dm_block *b)
291 return dm_bm_unlock(b);
293 EXPORT_SYMBOL_GPL(dm_tm_unlock);
295 void dm_tm_inc(struct dm_transaction_manager *tm, dm_block_t b)
298 * The non-blocking clone doesn't support this.
300 BUG_ON(tm->is_clone);
302 dm_sm_inc_block(tm->sm, b);
304 EXPORT_SYMBOL_GPL(dm_tm_inc);
306 void dm_tm_dec(struct dm_transaction_manager *tm, dm_block_t b)
309 * The non-blocking clone doesn't support this.
311 BUG_ON(tm->is_clone);
313 dm_sm_dec_block(tm->sm, b);
316 int dm_tm_ref(struct dm_transaction_manager *tm, dm_block_t b,
317 uint32_t *result)
319 if (tm->is_clone)
320 return -EWOULDBLOCK;
322 return dm_sm_get_count(tm->sm, b, result);
325 struct dm_block_manager *dm_tm_get_bm(struct dm_transaction_manager *tm)
327 return tm->bm;
330 /*----------------------------------------------------------------*/
332 static int dm_tm_create_internal(struct dm_block_manager *bm,
333 dm_block_t sb_location,
334 struct dm_block_validator *sb_validator,
335 size_t root_offset, size_t root_max_len,
336 struct dm_transaction_manager **tm,
337 struct dm_space_map **sm,
338 struct dm_block **sblock,
339 int create)
341 int r;
343 *sm = dm_sm_metadata_init();
344 if (IS_ERR(*sm))
345 return PTR_ERR(*sm);
347 *tm = dm_tm_create(bm, *sm);
348 if (IS_ERR(*tm)) {
349 dm_sm_destroy(*sm);
350 return PTR_ERR(*tm);
353 if (create) {
354 r = dm_bm_write_lock_zero(dm_tm_get_bm(*tm), sb_location,
355 sb_validator, sblock);
356 if (r < 0) {
357 DMERR("couldn't lock superblock");
358 goto bad1;
361 r = dm_sm_metadata_create(*sm, *tm, dm_bm_nr_blocks(bm),
362 sb_location);
363 if (r) {
364 DMERR("couldn't create metadata space map");
365 goto bad2;
368 } else {
369 r = dm_bm_write_lock(dm_tm_get_bm(*tm), sb_location,
370 sb_validator, sblock);
371 if (r < 0) {
372 DMERR("couldn't lock superblock");
373 goto bad1;
376 r = dm_sm_metadata_open(*sm, *tm,
377 dm_block_data(*sblock) + root_offset,
378 root_max_len);
379 if (IS_ERR(*sm)) {
380 DMERR("couldn't open metadata space map");
381 goto bad2;
385 return 0;
387 bad2:
388 dm_tm_unlock(*tm, *sblock);
389 bad1:
390 dm_tm_destroy(*tm);
391 dm_sm_destroy(*sm);
392 return r;
395 int dm_tm_create_with_sm(struct dm_block_manager *bm, dm_block_t sb_location,
396 struct dm_block_validator *sb_validator,
397 struct dm_transaction_manager **tm,
398 struct dm_space_map **sm, struct dm_block **sblock)
400 return dm_tm_create_internal(bm, sb_location, sb_validator,
401 0, 0, tm, sm, sblock, 1);
403 EXPORT_SYMBOL_GPL(dm_tm_create_with_sm);
405 int dm_tm_open_with_sm(struct dm_block_manager *bm, dm_block_t sb_location,
406 struct dm_block_validator *sb_validator,
407 size_t root_offset, size_t root_max_len,
408 struct dm_transaction_manager **tm,
409 struct dm_space_map **sm, struct dm_block **sblock)
411 return dm_tm_create_internal(bm, sb_location, sb_validator, root_offset,
412 root_max_len, tm, sm, sblock, 0);
414 EXPORT_SYMBOL_GPL(dm_tm_open_with_sm);