WIP FPC-III support
[linux/fpc-iii.git] / drivers / md / persistent-data / dm-space-map-common.c
blobd8b4125e338ca84f05884876aaf64488c5d28ace
1 /*
2 * Copyright (C) 2011 Red Hat, Inc.
4 * This file is released under the GPL.
5 */
7 #include "dm-space-map-common.h"
8 #include "dm-transaction-manager.h"
10 #include <linux/bitops.h>
11 #include <linux/device-mapper.h>
13 #define DM_MSG_PREFIX "space map common"
15 /*----------------------------------------------------------------*/
18 * Index validator.
20 #define INDEX_CSUM_XOR 160478
22 static void index_prepare_for_write(struct dm_block_validator *v,
23 struct dm_block *b,
24 size_t block_size)
26 struct disk_metadata_index *mi_le = dm_block_data(b);
28 mi_le->blocknr = cpu_to_le64(dm_block_location(b));
29 mi_le->csum = cpu_to_le32(dm_bm_checksum(&mi_le->padding,
30 block_size - sizeof(__le32),
31 INDEX_CSUM_XOR));
34 static int index_check(struct dm_block_validator *v,
35 struct dm_block *b,
36 size_t block_size)
38 struct disk_metadata_index *mi_le = dm_block_data(b);
39 __le32 csum_disk;
41 if (dm_block_location(b) != le64_to_cpu(mi_le->blocknr)) {
42 DMERR_LIMIT("index_check failed: blocknr %llu != wanted %llu",
43 le64_to_cpu(mi_le->blocknr), dm_block_location(b));
44 return -ENOTBLK;
47 csum_disk = cpu_to_le32(dm_bm_checksum(&mi_le->padding,
48 block_size - sizeof(__le32),
49 INDEX_CSUM_XOR));
50 if (csum_disk != mi_le->csum) {
51 DMERR_LIMIT("index_check failed: csum %u != wanted %u",
52 le32_to_cpu(csum_disk), le32_to_cpu(mi_le->csum));
53 return -EILSEQ;
56 return 0;
59 static struct dm_block_validator index_validator = {
60 .name = "index",
61 .prepare_for_write = index_prepare_for_write,
62 .check = index_check
65 /*----------------------------------------------------------------*/
68 * Bitmap validator
70 #define BITMAP_CSUM_XOR 240779
72 static void dm_bitmap_prepare_for_write(struct dm_block_validator *v,
73 struct dm_block *b,
74 size_t block_size)
76 struct disk_bitmap_header *disk_header = dm_block_data(b);
78 disk_header->blocknr = cpu_to_le64(dm_block_location(b));
79 disk_header->csum = cpu_to_le32(dm_bm_checksum(&disk_header->not_used,
80 block_size - sizeof(__le32),
81 BITMAP_CSUM_XOR));
84 static int dm_bitmap_check(struct dm_block_validator *v,
85 struct dm_block *b,
86 size_t block_size)
88 struct disk_bitmap_header *disk_header = dm_block_data(b);
89 __le32 csum_disk;
91 if (dm_block_location(b) != le64_to_cpu(disk_header->blocknr)) {
92 DMERR_LIMIT("bitmap check failed: blocknr %llu != wanted %llu",
93 le64_to_cpu(disk_header->blocknr), dm_block_location(b));
94 return -ENOTBLK;
97 csum_disk = cpu_to_le32(dm_bm_checksum(&disk_header->not_used,
98 block_size - sizeof(__le32),
99 BITMAP_CSUM_XOR));
100 if (csum_disk != disk_header->csum) {
101 DMERR_LIMIT("bitmap check failed: csum %u != wanted %u",
102 le32_to_cpu(csum_disk), le32_to_cpu(disk_header->csum));
103 return -EILSEQ;
106 return 0;
109 static struct dm_block_validator dm_sm_bitmap_validator = {
110 .name = "sm_bitmap",
111 .prepare_for_write = dm_bitmap_prepare_for_write,
112 .check = dm_bitmap_check,
115 /*----------------------------------------------------------------*/
117 #define ENTRIES_PER_WORD 32
118 #define ENTRIES_SHIFT 5
120 static void *dm_bitmap_data(struct dm_block *b)
122 return dm_block_data(b) + sizeof(struct disk_bitmap_header);
125 #define WORD_MASK_HIGH 0xAAAAAAAAAAAAAAAAULL
127 static unsigned dm_bitmap_word_used(void *addr, unsigned b)
129 __le64 *words_le = addr;
130 __le64 *w_le = words_le + (b >> ENTRIES_SHIFT);
132 uint64_t bits = le64_to_cpu(*w_le);
133 uint64_t mask = (bits + WORD_MASK_HIGH + 1) & WORD_MASK_HIGH;
135 return !(~bits & mask);
138 static unsigned sm_lookup_bitmap(void *addr, unsigned b)
140 __le64 *words_le = addr;
141 __le64 *w_le = words_le + (b >> ENTRIES_SHIFT);
142 unsigned hi, lo;
144 b = (b & (ENTRIES_PER_WORD - 1)) << 1;
145 hi = !!test_bit_le(b, (void *) w_le);
146 lo = !!test_bit_le(b + 1, (void *) w_le);
147 return (hi << 1) | lo;
150 static void sm_set_bitmap(void *addr, unsigned b, unsigned val)
152 __le64 *words_le = addr;
153 __le64 *w_le = words_le + (b >> ENTRIES_SHIFT);
155 b = (b & (ENTRIES_PER_WORD - 1)) << 1;
157 if (val & 2)
158 __set_bit_le(b, (void *) w_le);
159 else
160 __clear_bit_le(b, (void *) w_le);
162 if (val & 1)
163 __set_bit_le(b + 1, (void *) w_le);
164 else
165 __clear_bit_le(b + 1, (void *) w_le);
168 static int sm_find_free(void *addr, unsigned begin, unsigned end,
169 unsigned *result)
171 while (begin < end) {
172 if (!(begin & (ENTRIES_PER_WORD - 1)) &&
173 dm_bitmap_word_used(addr, begin)) {
174 begin += ENTRIES_PER_WORD;
175 continue;
178 if (!sm_lookup_bitmap(addr, begin)) {
179 *result = begin;
180 return 0;
183 begin++;
186 return -ENOSPC;
189 /*----------------------------------------------------------------*/
191 static int sm_ll_init(struct ll_disk *ll, struct dm_transaction_manager *tm)
193 memset(ll, 0, sizeof(struct ll_disk));
195 ll->tm = tm;
197 ll->bitmap_info.tm = tm;
198 ll->bitmap_info.levels = 1;
201 * Because the new bitmap blocks are created via a shadow
202 * operation, the old entry has already had its reference count
203 * decremented and we don't need the btree to do any bookkeeping.
205 ll->bitmap_info.value_type.size = sizeof(struct disk_index_entry);
206 ll->bitmap_info.value_type.inc = NULL;
207 ll->bitmap_info.value_type.dec = NULL;
208 ll->bitmap_info.value_type.equal = NULL;
210 ll->ref_count_info.tm = tm;
211 ll->ref_count_info.levels = 1;
212 ll->ref_count_info.value_type.size = sizeof(uint32_t);
213 ll->ref_count_info.value_type.inc = NULL;
214 ll->ref_count_info.value_type.dec = NULL;
215 ll->ref_count_info.value_type.equal = NULL;
217 ll->block_size = dm_bm_block_size(dm_tm_get_bm(tm));
219 if (ll->block_size > (1 << 30)) {
220 DMERR("block size too big to hold bitmaps");
221 return -EINVAL;
224 ll->entries_per_block = (ll->block_size - sizeof(struct disk_bitmap_header)) *
225 ENTRIES_PER_BYTE;
226 ll->nr_blocks = 0;
227 ll->bitmap_root = 0;
228 ll->ref_count_root = 0;
229 ll->bitmap_index_changed = false;
231 return 0;
234 int sm_ll_extend(struct ll_disk *ll, dm_block_t extra_blocks)
236 int r;
237 dm_block_t i, nr_blocks, nr_indexes;
238 unsigned old_blocks, blocks;
240 nr_blocks = ll->nr_blocks + extra_blocks;
241 old_blocks = dm_sector_div_up(ll->nr_blocks, ll->entries_per_block);
242 blocks = dm_sector_div_up(nr_blocks, ll->entries_per_block);
244 nr_indexes = dm_sector_div_up(nr_blocks, ll->entries_per_block);
245 if (nr_indexes > ll->max_entries(ll)) {
246 DMERR("space map too large");
247 return -EINVAL;
251 * We need to set this before the dm_tm_new_block() call below.
253 ll->nr_blocks = nr_blocks;
254 for (i = old_blocks; i < blocks; i++) {
255 struct dm_block *b;
256 struct disk_index_entry idx;
258 r = dm_tm_new_block(ll->tm, &dm_sm_bitmap_validator, &b);
259 if (r < 0)
260 return r;
262 idx.blocknr = cpu_to_le64(dm_block_location(b));
264 dm_tm_unlock(ll->tm, b);
266 idx.nr_free = cpu_to_le32(ll->entries_per_block);
267 idx.none_free_before = 0;
269 r = ll->save_ie(ll, i, &idx);
270 if (r < 0)
271 return r;
274 return 0;
277 int sm_ll_lookup_bitmap(struct ll_disk *ll, dm_block_t b, uint32_t *result)
279 int r;
280 dm_block_t index = b;
281 struct disk_index_entry ie_disk;
282 struct dm_block *blk;
284 b = do_div(index, ll->entries_per_block);
285 r = ll->load_ie(ll, index, &ie_disk);
286 if (r < 0)
287 return r;
289 r = dm_tm_read_lock(ll->tm, le64_to_cpu(ie_disk.blocknr),
290 &dm_sm_bitmap_validator, &blk);
291 if (r < 0)
292 return r;
294 *result = sm_lookup_bitmap(dm_bitmap_data(blk), b);
296 dm_tm_unlock(ll->tm, blk);
298 return 0;
301 static int sm_ll_lookup_big_ref_count(struct ll_disk *ll, dm_block_t b,
302 uint32_t *result)
304 __le32 le_rc;
305 int r;
307 r = dm_btree_lookup(&ll->ref_count_info, ll->ref_count_root, &b, &le_rc);
308 if (r < 0)
309 return r;
311 *result = le32_to_cpu(le_rc);
313 return r;
316 int sm_ll_lookup(struct ll_disk *ll, dm_block_t b, uint32_t *result)
318 int r = sm_ll_lookup_bitmap(ll, b, result);
320 if (r)
321 return r;
323 if (*result != 3)
324 return r;
326 return sm_ll_lookup_big_ref_count(ll, b, result);
329 int sm_ll_find_free_block(struct ll_disk *ll, dm_block_t begin,
330 dm_block_t end, dm_block_t *result)
332 int r;
333 struct disk_index_entry ie_disk;
334 dm_block_t i, index_begin = begin;
335 dm_block_t index_end = dm_sector_div_up(end, ll->entries_per_block);
338 * FIXME: Use shifts
340 begin = do_div(index_begin, ll->entries_per_block);
341 end = do_div(end, ll->entries_per_block);
343 for (i = index_begin; i < index_end; i++, begin = 0) {
344 struct dm_block *blk;
345 unsigned position;
346 uint32_t bit_end;
348 r = ll->load_ie(ll, i, &ie_disk);
349 if (r < 0)
350 return r;
352 if (le32_to_cpu(ie_disk.nr_free) == 0)
353 continue;
355 r = dm_tm_read_lock(ll->tm, le64_to_cpu(ie_disk.blocknr),
356 &dm_sm_bitmap_validator, &blk);
357 if (r < 0)
358 return r;
360 bit_end = (i == index_end - 1) ? end : ll->entries_per_block;
362 r = sm_find_free(dm_bitmap_data(blk),
363 max_t(unsigned, begin, le32_to_cpu(ie_disk.none_free_before)),
364 bit_end, &position);
365 if (r == -ENOSPC) {
367 * This might happen because we started searching
368 * part way through the bitmap.
370 dm_tm_unlock(ll->tm, blk);
371 continue;
374 dm_tm_unlock(ll->tm, blk);
376 *result = i * ll->entries_per_block + (dm_block_t) position;
377 return 0;
380 return -ENOSPC;
383 int sm_ll_find_common_free_block(struct ll_disk *old_ll, struct ll_disk *new_ll,
384 dm_block_t begin, dm_block_t end, dm_block_t *b)
386 int r;
387 uint32_t count;
389 do {
390 r = sm_ll_find_free_block(new_ll, begin, new_ll->nr_blocks, b);
391 if (r)
392 break;
394 /* double check this block wasn't used in the old transaction */
395 if (*b >= old_ll->nr_blocks)
396 count = 0;
397 else {
398 r = sm_ll_lookup(old_ll, *b, &count);
399 if (r)
400 break;
402 if (count)
403 begin = *b + 1;
405 } while (count);
407 return r;
410 static int sm_ll_mutate(struct ll_disk *ll, dm_block_t b,
411 int (*mutator)(void *context, uint32_t old, uint32_t *new),
412 void *context, enum allocation_event *ev)
414 int r;
415 uint32_t bit, old, ref_count;
416 struct dm_block *nb;
417 dm_block_t index = b;
418 struct disk_index_entry ie_disk;
419 void *bm_le;
420 int inc;
422 bit = do_div(index, ll->entries_per_block);
423 r = ll->load_ie(ll, index, &ie_disk);
424 if (r < 0)
425 return r;
427 r = dm_tm_shadow_block(ll->tm, le64_to_cpu(ie_disk.blocknr),
428 &dm_sm_bitmap_validator, &nb, &inc);
429 if (r < 0) {
430 DMERR("dm_tm_shadow_block() failed");
431 return r;
433 ie_disk.blocknr = cpu_to_le64(dm_block_location(nb));
435 bm_le = dm_bitmap_data(nb);
436 old = sm_lookup_bitmap(bm_le, bit);
438 if (old > 2) {
439 r = sm_ll_lookup_big_ref_count(ll, b, &old);
440 if (r < 0) {
441 dm_tm_unlock(ll->tm, nb);
442 return r;
446 r = mutator(context, old, &ref_count);
447 if (r) {
448 dm_tm_unlock(ll->tm, nb);
449 return r;
452 if (ref_count <= 2) {
453 sm_set_bitmap(bm_le, bit, ref_count);
455 dm_tm_unlock(ll->tm, nb);
457 if (old > 2) {
458 r = dm_btree_remove(&ll->ref_count_info,
459 ll->ref_count_root,
460 &b, &ll->ref_count_root);
461 if (r)
462 return r;
465 } else {
466 __le32 le_rc = cpu_to_le32(ref_count);
468 sm_set_bitmap(bm_le, bit, 3);
469 dm_tm_unlock(ll->tm, nb);
471 __dm_bless_for_disk(&le_rc);
472 r = dm_btree_insert(&ll->ref_count_info, ll->ref_count_root,
473 &b, &le_rc, &ll->ref_count_root);
474 if (r < 0) {
475 DMERR("ref count insert failed");
476 return r;
480 if (ref_count && !old) {
481 *ev = SM_ALLOC;
482 ll->nr_allocated++;
483 le32_add_cpu(&ie_disk.nr_free, -1);
484 if (le32_to_cpu(ie_disk.none_free_before) == bit)
485 ie_disk.none_free_before = cpu_to_le32(bit + 1);
487 } else if (old && !ref_count) {
488 *ev = SM_FREE;
489 ll->nr_allocated--;
490 le32_add_cpu(&ie_disk.nr_free, 1);
491 ie_disk.none_free_before = cpu_to_le32(min(le32_to_cpu(ie_disk.none_free_before), bit));
492 } else
493 *ev = SM_NONE;
495 return ll->save_ie(ll, index, &ie_disk);
498 static int set_ref_count(void *context, uint32_t old, uint32_t *new)
500 *new = *((uint32_t *) context);
501 return 0;
504 int sm_ll_insert(struct ll_disk *ll, dm_block_t b,
505 uint32_t ref_count, enum allocation_event *ev)
507 return sm_ll_mutate(ll, b, set_ref_count, &ref_count, ev);
510 static int inc_ref_count(void *context, uint32_t old, uint32_t *new)
512 *new = old + 1;
513 return 0;
516 int sm_ll_inc(struct ll_disk *ll, dm_block_t b, enum allocation_event *ev)
518 return sm_ll_mutate(ll, b, inc_ref_count, NULL, ev);
521 static int dec_ref_count(void *context, uint32_t old, uint32_t *new)
523 if (!old) {
524 DMERR_LIMIT("unable to decrement a reference count below 0");
525 return -EINVAL;
528 *new = old - 1;
529 return 0;
532 int sm_ll_dec(struct ll_disk *ll, dm_block_t b, enum allocation_event *ev)
534 return sm_ll_mutate(ll, b, dec_ref_count, NULL, ev);
537 int sm_ll_commit(struct ll_disk *ll)
539 int r = 0;
541 if (ll->bitmap_index_changed) {
542 r = ll->commit(ll);
543 if (!r)
544 ll->bitmap_index_changed = false;
547 return r;
550 /*----------------------------------------------------------------*/
552 static int metadata_ll_load_ie(struct ll_disk *ll, dm_block_t index,
553 struct disk_index_entry *ie)
555 memcpy(ie, ll->mi_le.index + index, sizeof(*ie));
556 return 0;
559 static int metadata_ll_save_ie(struct ll_disk *ll, dm_block_t index,
560 struct disk_index_entry *ie)
562 ll->bitmap_index_changed = true;
563 memcpy(ll->mi_le.index + index, ie, sizeof(*ie));
564 return 0;
567 static int metadata_ll_init_index(struct ll_disk *ll)
569 int r;
570 struct dm_block *b;
572 r = dm_tm_new_block(ll->tm, &index_validator, &b);
573 if (r < 0)
574 return r;
576 ll->bitmap_root = dm_block_location(b);
578 dm_tm_unlock(ll->tm, b);
580 return 0;
583 static int metadata_ll_open(struct ll_disk *ll)
585 int r;
586 struct dm_block *block;
588 r = dm_tm_read_lock(ll->tm, ll->bitmap_root,
589 &index_validator, &block);
590 if (r)
591 return r;
593 memcpy(&ll->mi_le, dm_block_data(block), sizeof(ll->mi_le));
594 dm_tm_unlock(ll->tm, block);
596 return 0;
599 static dm_block_t metadata_ll_max_entries(struct ll_disk *ll)
601 return MAX_METADATA_BITMAPS;
604 static int metadata_ll_commit(struct ll_disk *ll)
606 int r, inc;
607 struct dm_block *b;
609 r = dm_tm_shadow_block(ll->tm, ll->bitmap_root, &index_validator, &b, &inc);
610 if (r)
611 return r;
613 memcpy(dm_block_data(b), &ll->mi_le, sizeof(ll->mi_le));
614 ll->bitmap_root = dm_block_location(b);
616 dm_tm_unlock(ll->tm, b);
618 return 0;
621 int sm_ll_new_metadata(struct ll_disk *ll, struct dm_transaction_manager *tm)
623 int r;
625 r = sm_ll_init(ll, tm);
626 if (r < 0)
627 return r;
629 ll->load_ie = metadata_ll_load_ie;
630 ll->save_ie = metadata_ll_save_ie;
631 ll->init_index = metadata_ll_init_index;
632 ll->open_index = metadata_ll_open;
633 ll->max_entries = metadata_ll_max_entries;
634 ll->commit = metadata_ll_commit;
636 ll->nr_blocks = 0;
637 ll->nr_allocated = 0;
639 r = ll->init_index(ll);
640 if (r < 0)
641 return r;
643 r = dm_btree_empty(&ll->ref_count_info, &ll->ref_count_root);
644 if (r < 0)
645 return r;
647 return 0;
650 int sm_ll_open_metadata(struct ll_disk *ll, struct dm_transaction_manager *tm,
651 void *root_le, size_t len)
653 int r;
654 struct disk_sm_root smr;
656 if (len < sizeof(struct disk_sm_root)) {
657 DMERR("sm_metadata root too small");
658 return -ENOMEM;
662 * We don't know the alignment of the root_le buffer, so need to
663 * copy into a new structure.
665 memcpy(&smr, root_le, sizeof(smr));
667 r = sm_ll_init(ll, tm);
668 if (r < 0)
669 return r;
671 ll->load_ie = metadata_ll_load_ie;
672 ll->save_ie = metadata_ll_save_ie;
673 ll->init_index = metadata_ll_init_index;
674 ll->open_index = metadata_ll_open;
675 ll->max_entries = metadata_ll_max_entries;
676 ll->commit = metadata_ll_commit;
678 ll->nr_blocks = le64_to_cpu(smr.nr_blocks);
679 ll->nr_allocated = le64_to_cpu(smr.nr_allocated);
680 ll->bitmap_root = le64_to_cpu(smr.bitmap_root);
681 ll->ref_count_root = le64_to_cpu(smr.ref_count_root);
683 return ll->open_index(ll);
686 /*----------------------------------------------------------------*/
688 static int disk_ll_load_ie(struct ll_disk *ll, dm_block_t index,
689 struct disk_index_entry *ie)
691 return dm_btree_lookup(&ll->bitmap_info, ll->bitmap_root, &index, ie);
694 static int disk_ll_save_ie(struct ll_disk *ll, dm_block_t index,
695 struct disk_index_entry *ie)
697 __dm_bless_for_disk(ie);
698 return dm_btree_insert(&ll->bitmap_info, ll->bitmap_root,
699 &index, ie, &ll->bitmap_root);
702 static int disk_ll_init_index(struct ll_disk *ll)
704 return dm_btree_empty(&ll->bitmap_info, &ll->bitmap_root);
707 static int disk_ll_open(struct ll_disk *ll)
709 /* nothing to do */
710 return 0;
713 static dm_block_t disk_ll_max_entries(struct ll_disk *ll)
715 return -1ULL;
718 static int disk_ll_commit(struct ll_disk *ll)
720 return 0;
723 int sm_ll_new_disk(struct ll_disk *ll, struct dm_transaction_manager *tm)
725 int r;
727 r = sm_ll_init(ll, tm);
728 if (r < 0)
729 return r;
731 ll->load_ie = disk_ll_load_ie;
732 ll->save_ie = disk_ll_save_ie;
733 ll->init_index = disk_ll_init_index;
734 ll->open_index = disk_ll_open;
735 ll->max_entries = disk_ll_max_entries;
736 ll->commit = disk_ll_commit;
738 ll->nr_blocks = 0;
739 ll->nr_allocated = 0;
741 r = ll->init_index(ll);
742 if (r < 0)
743 return r;
745 r = dm_btree_empty(&ll->ref_count_info, &ll->ref_count_root);
746 if (r < 0)
747 return r;
749 return 0;
752 int sm_ll_open_disk(struct ll_disk *ll, struct dm_transaction_manager *tm,
753 void *root_le, size_t len)
755 int r;
756 struct disk_sm_root *smr = root_le;
758 if (len < sizeof(struct disk_sm_root)) {
759 DMERR("sm_metadata root too small");
760 return -ENOMEM;
763 r = sm_ll_init(ll, tm);
764 if (r < 0)
765 return r;
767 ll->load_ie = disk_ll_load_ie;
768 ll->save_ie = disk_ll_save_ie;
769 ll->init_index = disk_ll_init_index;
770 ll->open_index = disk_ll_open;
771 ll->max_entries = disk_ll_max_entries;
772 ll->commit = disk_ll_commit;
774 ll->nr_blocks = le64_to_cpu(smr->nr_blocks);
775 ll->nr_allocated = le64_to_cpu(smr->nr_allocated);
776 ll->bitmap_root = le64_to_cpu(smr->bitmap_root);
777 ll->ref_count_root = le64_to_cpu(smr->ref_count_root);
779 return ll->open_index(ll);
782 /*----------------------------------------------------------------*/