spi-topcliff-pch: add recovery processing in case wait-event timeout
[zen-stable.git] / drivers / md / persistent-data / dm-space-map-checker.c
blob50ed53bf4aa2b1efe1136c2520067ece16bb9b35
1 /*
2 * Copyright (C) 2011 Red Hat, Inc.
4 * This file is released under the GPL.
5 */
7 #include "dm-space-map-checker.h"
9 #include <linux/device-mapper.h>
10 #include <linux/export.h>
12 #ifdef CONFIG_DM_DEBUG_SPACE_MAPS
14 #define DM_MSG_PREFIX "space map checker"
16 /*----------------------------------------------------------------*/
18 struct count_array {
19 dm_block_t nr;
20 dm_block_t nr_free;
22 uint32_t *counts;
25 static int ca_get_count(struct count_array *ca, dm_block_t b, uint32_t *count)
27 if (b >= ca->nr)
28 return -EINVAL;
30 *count = ca->counts[b];
31 return 0;
34 static int ca_count_more_than_one(struct count_array *ca, dm_block_t b, int *r)
36 if (b >= ca->nr)
37 return -EINVAL;
39 *r = ca->counts[b] > 1;
40 return 0;
43 static int ca_set_count(struct count_array *ca, dm_block_t b, uint32_t count)
45 uint32_t old_count;
47 if (b >= ca->nr)
48 return -EINVAL;
50 old_count = ca->counts[b];
52 if (!count && old_count)
53 ca->nr_free++;
55 else if (count && !old_count)
56 ca->nr_free--;
58 ca->counts[b] = count;
59 return 0;
62 static int ca_inc_block(struct count_array *ca, dm_block_t b)
64 if (b >= ca->nr)
65 return -EINVAL;
67 ca_set_count(ca, b, ca->counts[b] + 1);
68 return 0;
71 static int ca_dec_block(struct count_array *ca, dm_block_t b)
73 if (b >= ca->nr)
74 return -EINVAL;
76 BUG_ON(ca->counts[b] == 0);
77 ca_set_count(ca, b, ca->counts[b] - 1);
78 return 0;
81 static int ca_create(struct count_array *ca, struct dm_space_map *sm)
83 int r;
84 dm_block_t nr_blocks;
86 r = dm_sm_get_nr_blocks(sm, &nr_blocks);
87 if (r)
88 return r;
90 ca->nr = nr_blocks;
91 ca->nr_free = nr_blocks;
92 ca->counts = kzalloc(sizeof(*ca->counts) * nr_blocks, GFP_KERNEL);
93 if (!ca->counts)
94 return -ENOMEM;
96 return 0;
99 static int ca_load(struct count_array *ca, struct dm_space_map *sm)
101 int r;
102 uint32_t count;
103 dm_block_t nr_blocks, i;
105 r = dm_sm_get_nr_blocks(sm, &nr_blocks);
106 if (r)
107 return r;
109 BUG_ON(ca->nr != nr_blocks);
111 DMWARN("Loading debug space map from disk. This may take some time");
112 for (i = 0; i < nr_blocks; i++) {
113 r = dm_sm_get_count(sm, i, &count);
114 if (r) {
115 DMERR("load failed");
116 return r;
119 ca_set_count(ca, i, count);
121 DMWARN("Load complete");
123 return 0;
126 static int ca_extend(struct count_array *ca, dm_block_t extra_blocks)
128 dm_block_t nr_blocks = ca->nr + extra_blocks;
129 uint32_t *counts = kzalloc(sizeof(*counts) * nr_blocks, GFP_KERNEL);
130 if (!counts)
131 return -ENOMEM;
133 memcpy(counts, ca->counts, sizeof(*counts) * ca->nr);
134 kfree(ca->counts);
135 ca->nr = nr_blocks;
136 ca->nr_free += extra_blocks;
137 ca->counts = counts;
138 return 0;
141 static int ca_commit(struct count_array *old, struct count_array *new)
143 if (old->nr != new->nr) {
144 BUG_ON(old->nr > new->nr);
145 ca_extend(old, new->nr - old->nr);
148 BUG_ON(old->nr != new->nr);
149 old->nr_free = new->nr_free;
150 memcpy(old->counts, new->counts, sizeof(*old->counts) * old->nr);
151 return 0;
154 static void ca_destroy(struct count_array *ca)
156 kfree(ca->counts);
159 /*----------------------------------------------------------------*/
161 struct sm_checker {
162 struct dm_space_map sm;
164 struct count_array old_counts;
165 struct count_array counts;
167 struct dm_space_map *real_sm;
170 static void sm_checker_destroy(struct dm_space_map *sm)
172 struct sm_checker *smc = container_of(sm, struct sm_checker, sm);
174 dm_sm_destroy(smc->real_sm);
175 ca_destroy(&smc->old_counts);
176 ca_destroy(&smc->counts);
177 kfree(smc);
180 static int sm_checker_get_nr_blocks(struct dm_space_map *sm, dm_block_t *count)
182 struct sm_checker *smc = container_of(sm, struct sm_checker, sm);
183 int r = dm_sm_get_nr_blocks(smc->real_sm, count);
184 if (!r)
185 BUG_ON(smc->old_counts.nr != *count);
186 return r;
189 static int sm_checker_get_nr_free(struct dm_space_map *sm, dm_block_t *count)
191 struct sm_checker *smc = container_of(sm, struct sm_checker, sm);
192 int r = dm_sm_get_nr_free(smc->real_sm, count);
193 if (!r) {
195 * Slow, but we know it's correct.
197 dm_block_t b, n = 0;
198 for (b = 0; b < smc->old_counts.nr; b++)
199 if (smc->old_counts.counts[b] == 0 &&
200 smc->counts.counts[b] == 0)
201 n++;
203 if (n != *count)
204 DMERR("free block counts differ, checker %u, sm-disk:%u",
205 (unsigned) n, (unsigned) *count);
207 return r;
210 static int sm_checker_new_block(struct dm_space_map *sm, dm_block_t *b)
212 struct sm_checker *smc = container_of(sm, struct sm_checker, sm);
213 int r = dm_sm_new_block(smc->real_sm, b);
215 if (!r) {
216 BUG_ON(*b >= smc->old_counts.nr);
217 BUG_ON(smc->old_counts.counts[*b] != 0);
218 BUG_ON(*b >= smc->counts.nr);
219 BUG_ON(smc->counts.counts[*b] != 0);
220 ca_set_count(&smc->counts, *b, 1);
223 return r;
226 static int sm_checker_inc_block(struct dm_space_map *sm, dm_block_t b)
228 struct sm_checker *smc = container_of(sm, struct sm_checker, sm);
229 int r = dm_sm_inc_block(smc->real_sm, b);
230 int r2 = ca_inc_block(&smc->counts, b);
231 BUG_ON(r != r2);
232 return r;
235 static int sm_checker_dec_block(struct dm_space_map *sm, dm_block_t b)
237 struct sm_checker *smc = container_of(sm, struct sm_checker, sm);
238 int r = dm_sm_dec_block(smc->real_sm, b);
239 int r2 = ca_dec_block(&smc->counts, b);
240 BUG_ON(r != r2);
241 return r;
244 static int sm_checker_get_count(struct dm_space_map *sm, dm_block_t b, uint32_t *result)
246 struct sm_checker *smc = container_of(sm, struct sm_checker, sm);
247 uint32_t result2 = 0;
248 int r = dm_sm_get_count(smc->real_sm, b, result);
249 int r2 = ca_get_count(&smc->counts, b, &result2);
251 BUG_ON(r != r2);
252 if (!r)
253 BUG_ON(*result != result2);
254 return r;
257 static int sm_checker_count_more_than_one(struct dm_space_map *sm, dm_block_t b, int *result)
259 struct sm_checker *smc = container_of(sm, struct sm_checker, sm);
260 int result2 = 0;
261 int r = dm_sm_count_is_more_than_one(smc->real_sm, b, result);
262 int r2 = ca_count_more_than_one(&smc->counts, b, &result2);
264 BUG_ON(r != r2);
265 if (!r)
266 BUG_ON(!(*result) && result2);
267 return r;
270 static int sm_checker_set_count(struct dm_space_map *sm, dm_block_t b, uint32_t count)
272 struct sm_checker *smc = container_of(sm, struct sm_checker, sm);
273 uint32_t old_rc;
274 int r = dm_sm_set_count(smc->real_sm, b, count);
275 int r2;
277 BUG_ON(b >= smc->counts.nr);
278 old_rc = smc->counts.counts[b];
279 r2 = ca_set_count(&smc->counts, b, count);
280 BUG_ON(r != r2);
282 return r;
285 static int sm_checker_commit(struct dm_space_map *sm)
287 struct sm_checker *smc = container_of(sm, struct sm_checker, sm);
288 int r;
290 r = dm_sm_commit(smc->real_sm);
291 if (r)
292 return r;
294 r = ca_commit(&smc->old_counts, &smc->counts);
295 if (r)
296 return r;
298 return 0;
301 static int sm_checker_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
303 struct sm_checker *smc = container_of(sm, struct sm_checker, sm);
304 int r = dm_sm_extend(smc->real_sm, extra_blocks);
305 if (r)
306 return r;
308 return ca_extend(&smc->counts, extra_blocks);
311 static int sm_checker_root_size(struct dm_space_map *sm, size_t *result)
313 struct sm_checker *smc = container_of(sm, struct sm_checker, sm);
314 return dm_sm_root_size(smc->real_sm, result);
317 static int sm_checker_copy_root(struct dm_space_map *sm, void *copy_to_here_le, size_t len)
319 struct sm_checker *smc = container_of(sm, struct sm_checker, sm);
320 return dm_sm_copy_root(smc->real_sm, copy_to_here_le, len);
323 /*----------------------------------------------------------------*/
325 static struct dm_space_map ops_ = {
326 .destroy = sm_checker_destroy,
327 .get_nr_blocks = sm_checker_get_nr_blocks,
328 .get_nr_free = sm_checker_get_nr_free,
329 .inc_block = sm_checker_inc_block,
330 .dec_block = sm_checker_dec_block,
331 .new_block = sm_checker_new_block,
332 .get_count = sm_checker_get_count,
333 .count_is_more_than_one = sm_checker_count_more_than_one,
334 .set_count = sm_checker_set_count,
335 .commit = sm_checker_commit,
336 .extend = sm_checker_extend,
337 .root_size = sm_checker_root_size,
338 .copy_root = sm_checker_copy_root
341 struct dm_space_map *dm_sm_checker_create(struct dm_space_map *sm)
343 int r;
344 struct sm_checker *smc;
346 if (!sm)
347 return NULL;
349 smc = kmalloc(sizeof(*smc), GFP_KERNEL);
350 if (!smc)
351 return NULL;
353 memcpy(&smc->sm, &ops_, sizeof(smc->sm));
354 r = ca_create(&smc->old_counts, sm);
355 if (r) {
356 kfree(smc);
357 return NULL;
360 r = ca_create(&smc->counts, sm);
361 if (r) {
362 ca_destroy(&smc->old_counts);
363 kfree(smc);
364 return NULL;
367 smc->real_sm = sm;
369 r = ca_load(&smc->counts, sm);
370 if (r) {
371 ca_destroy(&smc->counts);
372 ca_destroy(&smc->old_counts);
373 kfree(smc);
374 return NULL;
377 r = ca_commit(&smc->old_counts, &smc->counts);
378 if (r) {
379 ca_destroy(&smc->counts);
380 ca_destroy(&smc->old_counts);
381 kfree(smc);
382 return NULL;
385 return &smc->sm;
387 EXPORT_SYMBOL_GPL(dm_sm_checker_create);
389 struct dm_space_map *dm_sm_checker_create_fresh(struct dm_space_map *sm)
391 int r;
392 struct sm_checker *smc;
394 if (!sm)
395 return NULL;
397 smc = kmalloc(sizeof(*smc), GFP_KERNEL);
398 if (!smc)
399 return NULL;
401 memcpy(&smc->sm, &ops_, sizeof(smc->sm));
402 r = ca_create(&smc->old_counts, sm);
403 if (r) {
404 kfree(smc);
405 return NULL;
408 r = ca_create(&smc->counts, sm);
409 if (r) {
410 ca_destroy(&smc->old_counts);
411 kfree(smc);
412 return NULL;
415 smc->real_sm = sm;
416 return &smc->sm;
418 EXPORT_SYMBOL_GPL(dm_sm_checker_create_fresh);
420 /*----------------------------------------------------------------*/
422 #else
424 struct dm_space_map *dm_sm_checker_create(struct dm_space_map *sm)
426 return sm;
428 EXPORT_SYMBOL_GPL(dm_sm_checker_create);
430 struct dm_space_map *dm_sm_checker_create_fresh(struct dm_space_map *sm)
432 return sm;
434 EXPORT_SYMBOL_GPL(dm_sm_checker_create_fresh);
436 /*----------------------------------------------------------------*/
438 #endif