ARM: mm: avoid taking ASID spinlock on fastpath
[linux/fpc-iii.git] / drivers / md / persistent-data / dm-space-map-common.c
blobf3a9af8cdec3514448ae3f6ce9cb12ecca7f20ae
1 /*
2 * Copyright (C) 2011 Red Hat, Inc.
4 * This file is released under the GPL.
5 */
7 #include "dm-space-map-common.h"
8 #include "dm-transaction-manager.h"
10 #include <linux/bitops.h>
11 #include <linux/device-mapper.h>
13 #define DM_MSG_PREFIX "space map common"
15 /*----------------------------------------------------------------*/
18 * Index validator.
20 #define INDEX_CSUM_XOR 160478
22 static void index_prepare_for_write(struct dm_block_validator *v,
23 struct dm_block *b,
24 size_t block_size)
26 struct disk_metadata_index *mi_le = dm_block_data(b);
28 mi_le->blocknr = cpu_to_le64(dm_block_location(b));
29 mi_le->csum = cpu_to_le32(dm_bm_checksum(&mi_le->padding,
30 block_size - sizeof(__le32),
31 INDEX_CSUM_XOR));
34 static int index_check(struct dm_block_validator *v,
35 struct dm_block *b,
36 size_t block_size)
38 struct disk_metadata_index *mi_le = dm_block_data(b);
39 __le32 csum_disk;
41 if (dm_block_location(b) != le64_to_cpu(mi_le->blocknr)) {
42 DMERR("index_check failed blocknr %llu wanted %llu",
43 le64_to_cpu(mi_le->blocknr), dm_block_location(b));
44 return -ENOTBLK;
47 csum_disk = cpu_to_le32(dm_bm_checksum(&mi_le->padding,
48 block_size - sizeof(__le32),
49 INDEX_CSUM_XOR));
50 if (csum_disk != mi_le->csum) {
51 DMERR("index_check failed csum %u wanted %u",
52 le32_to_cpu(csum_disk), le32_to_cpu(mi_le->csum));
53 return -EILSEQ;
56 return 0;
59 static struct dm_block_validator index_validator = {
60 .name = "index",
61 .prepare_for_write = index_prepare_for_write,
62 .check = index_check
65 /*----------------------------------------------------------------*/
68 * Bitmap validator
70 #define BITMAP_CSUM_XOR 240779
72 static void bitmap_prepare_for_write(struct dm_block_validator *v,
73 struct dm_block *b,
74 size_t block_size)
76 struct disk_bitmap_header *disk_header = dm_block_data(b);
78 disk_header->blocknr = cpu_to_le64(dm_block_location(b));
79 disk_header->csum = cpu_to_le32(dm_bm_checksum(&disk_header->not_used,
80 block_size - sizeof(__le32),
81 BITMAP_CSUM_XOR));
84 static int bitmap_check(struct dm_block_validator *v,
85 struct dm_block *b,
86 size_t block_size)
88 struct disk_bitmap_header *disk_header = dm_block_data(b);
89 __le32 csum_disk;
91 if (dm_block_location(b) != le64_to_cpu(disk_header->blocknr)) {
92 DMERR("bitmap check failed blocknr %llu wanted %llu",
93 le64_to_cpu(disk_header->blocknr), dm_block_location(b));
94 return -ENOTBLK;
97 csum_disk = cpu_to_le32(dm_bm_checksum(&disk_header->not_used,
98 block_size - sizeof(__le32),
99 BITMAP_CSUM_XOR));
100 if (csum_disk != disk_header->csum) {
101 DMERR("bitmap check failed csum %u wanted %u",
102 le32_to_cpu(csum_disk), le32_to_cpu(disk_header->csum));
103 return -EILSEQ;
106 return 0;
109 static struct dm_block_validator dm_sm_bitmap_validator = {
110 .name = "sm_bitmap",
111 .prepare_for_write = bitmap_prepare_for_write,
112 .check = bitmap_check
115 /*----------------------------------------------------------------*/
117 #define ENTRIES_PER_WORD 32
118 #define ENTRIES_SHIFT 5
120 static void *dm_bitmap_data(struct dm_block *b)
122 return dm_block_data(b) + sizeof(struct disk_bitmap_header);
125 #define WORD_MASK_HIGH 0xAAAAAAAAAAAAAAAAULL
127 static unsigned bitmap_word_used(void *addr, unsigned b)
129 __le64 *words_le = addr;
130 __le64 *w_le = words_le + (b >> ENTRIES_SHIFT);
132 uint64_t bits = le64_to_cpu(*w_le);
133 uint64_t mask = (bits + WORD_MASK_HIGH + 1) & WORD_MASK_HIGH;
135 return !(~bits & mask);
138 static unsigned sm_lookup_bitmap(void *addr, unsigned b)
140 __le64 *words_le = addr;
141 __le64 *w_le = words_le + (b >> ENTRIES_SHIFT);
142 unsigned hi, lo;
144 b = (b & (ENTRIES_PER_WORD - 1)) << 1;
145 hi = !!test_bit_le(b, (void *) w_le);
146 lo = !!test_bit_le(b + 1, (void *) w_le);
147 return (hi << 1) | lo;
150 static void sm_set_bitmap(void *addr, unsigned b, unsigned val)
152 __le64 *words_le = addr;
153 __le64 *w_le = words_le + (b >> ENTRIES_SHIFT);
155 b = (b & (ENTRIES_PER_WORD - 1)) << 1;
157 if (val & 2)
158 __set_bit_le(b, (void *) w_le);
159 else
160 __clear_bit_le(b, (void *) w_le);
162 if (val & 1)
163 __set_bit_le(b + 1, (void *) w_le);
164 else
165 __clear_bit_le(b + 1, (void *) w_le);
168 static int sm_find_free(void *addr, unsigned begin, unsigned end,
169 unsigned *result)
171 while (begin < end) {
172 if (!(begin & (ENTRIES_PER_WORD - 1)) &&
173 bitmap_word_used(addr, begin)) {
174 begin += ENTRIES_PER_WORD;
175 continue;
178 if (!sm_lookup_bitmap(addr, begin)) {
179 *result = begin;
180 return 0;
183 begin++;
186 return -ENOSPC;
189 /*----------------------------------------------------------------*/
191 static int sm_ll_init(struct ll_disk *ll, struct dm_transaction_manager *tm)
193 ll->tm = tm;
195 ll->bitmap_info.tm = tm;
196 ll->bitmap_info.levels = 1;
199 * Because the new bitmap blocks are created via a shadow
200 * operation, the old entry has already had its reference count
201 * decremented and we don't need the btree to do any bookkeeping.
203 ll->bitmap_info.value_type.size = sizeof(struct disk_index_entry);
204 ll->bitmap_info.value_type.inc = NULL;
205 ll->bitmap_info.value_type.dec = NULL;
206 ll->bitmap_info.value_type.equal = NULL;
208 ll->ref_count_info.tm = tm;
209 ll->ref_count_info.levels = 1;
210 ll->ref_count_info.value_type.size = sizeof(uint32_t);
211 ll->ref_count_info.value_type.inc = NULL;
212 ll->ref_count_info.value_type.dec = NULL;
213 ll->ref_count_info.value_type.equal = NULL;
215 ll->block_size = dm_bm_block_size(dm_tm_get_bm(tm));
217 if (ll->block_size > (1 << 30)) {
218 DMERR("block size too big to hold bitmaps");
219 return -EINVAL;
222 ll->entries_per_block = (ll->block_size - sizeof(struct disk_bitmap_header)) *
223 ENTRIES_PER_BYTE;
224 ll->nr_blocks = 0;
225 ll->bitmap_root = 0;
226 ll->ref_count_root = 0;
227 ll->bitmap_index_changed = false;
229 return 0;
232 int sm_ll_extend(struct ll_disk *ll, dm_block_t extra_blocks)
234 int r;
235 dm_block_t i, nr_blocks, nr_indexes;
236 unsigned old_blocks, blocks;
238 nr_blocks = ll->nr_blocks + extra_blocks;
239 old_blocks = dm_sector_div_up(ll->nr_blocks, ll->entries_per_block);
240 blocks = dm_sector_div_up(nr_blocks, ll->entries_per_block);
242 nr_indexes = dm_sector_div_up(nr_blocks, ll->entries_per_block);
243 if (nr_indexes > ll->max_entries(ll)) {
244 DMERR("space map too large");
245 return -EINVAL;
248 for (i = old_blocks; i < blocks; i++) {
249 struct dm_block *b;
250 struct disk_index_entry idx;
252 r = dm_tm_new_block(ll->tm, &dm_sm_bitmap_validator, &b);
253 if (r < 0)
254 return r;
255 idx.blocknr = cpu_to_le64(dm_block_location(b));
257 r = dm_tm_unlock(ll->tm, b);
258 if (r < 0)
259 return r;
261 idx.nr_free = cpu_to_le32(ll->entries_per_block);
262 idx.none_free_before = 0;
264 r = ll->save_ie(ll, i, &idx);
265 if (r < 0)
266 return r;
269 ll->nr_blocks = nr_blocks;
270 return 0;
273 int sm_ll_lookup_bitmap(struct ll_disk *ll, dm_block_t b, uint32_t *result)
275 int r;
276 dm_block_t index = b;
277 struct disk_index_entry ie_disk;
278 struct dm_block *blk;
280 b = do_div(index, ll->entries_per_block);
281 r = ll->load_ie(ll, index, &ie_disk);
282 if (r < 0)
283 return r;
285 r = dm_tm_read_lock(ll->tm, le64_to_cpu(ie_disk.blocknr),
286 &dm_sm_bitmap_validator, &blk);
287 if (r < 0)
288 return r;
290 *result = sm_lookup_bitmap(dm_bitmap_data(blk), b);
292 return dm_tm_unlock(ll->tm, blk);
295 int sm_ll_lookup(struct ll_disk *ll, dm_block_t b, uint32_t *result)
297 __le32 le_rc;
298 int r = sm_ll_lookup_bitmap(ll, b, result);
300 if (r)
301 return r;
303 if (*result != 3)
304 return r;
306 r = dm_btree_lookup(&ll->ref_count_info, ll->ref_count_root, &b, &le_rc);
307 if (r < 0)
308 return r;
310 *result = le32_to_cpu(le_rc);
312 return r;
315 int sm_ll_find_free_block(struct ll_disk *ll, dm_block_t begin,
316 dm_block_t end, dm_block_t *result)
318 int r;
319 struct disk_index_entry ie_disk;
320 dm_block_t i, index_begin = begin;
321 dm_block_t index_end = dm_sector_div_up(end, ll->entries_per_block);
324 * FIXME: Use shifts
326 begin = do_div(index_begin, ll->entries_per_block);
327 end = do_div(end, ll->entries_per_block);
329 for (i = index_begin; i < index_end; i++, begin = 0) {
330 struct dm_block *blk;
331 unsigned position;
332 uint32_t bit_end;
334 r = ll->load_ie(ll, i, &ie_disk);
335 if (r < 0)
336 return r;
338 if (le32_to_cpu(ie_disk.nr_free) == 0)
339 continue;
341 r = dm_tm_read_lock(ll->tm, le64_to_cpu(ie_disk.blocknr),
342 &dm_sm_bitmap_validator, &blk);
343 if (r < 0)
344 return r;
346 bit_end = (i == index_end - 1) ? end : ll->entries_per_block;
348 r = sm_find_free(dm_bitmap_data(blk),
349 max_t(unsigned, begin, le32_to_cpu(ie_disk.none_free_before)),
350 bit_end, &position);
351 if (r == -ENOSPC) {
353 * This might happen because we started searching
354 * part way through the bitmap.
356 dm_tm_unlock(ll->tm, blk);
357 continue;
359 } else if (r < 0) {
360 dm_tm_unlock(ll->tm, blk);
361 return r;
364 r = dm_tm_unlock(ll->tm, blk);
365 if (r < 0)
366 return r;
368 *result = i * ll->entries_per_block + (dm_block_t) position;
369 return 0;
372 return -ENOSPC;
375 int sm_ll_insert(struct ll_disk *ll, dm_block_t b,
376 uint32_t ref_count, enum allocation_event *ev)
378 int r;
379 uint32_t bit, old;
380 struct dm_block *nb;
381 dm_block_t index = b;
382 struct disk_index_entry ie_disk;
383 void *bm_le;
384 int inc;
386 bit = do_div(index, ll->entries_per_block);
387 r = ll->load_ie(ll, index, &ie_disk);
388 if (r < 0)
389 return r;
391 r = dm_tm_shadow_block(ll->tm, le64_to_cpu(ie_disk.blocknr),
392 &dm_sm_bitmap_validator, &nb, &inc);
393 if (r < 0) {
394 DMERR("dm_tm_shadow_block() failed");
395 return r;
397 ie_disk.blocknr = cpu_to_le64(dm_block_location(nb));
399 bm_le = dm_bitmap_data(nb);
400 old = sm_lookup_bitmap(bm_le, bit);
402 if (ref_count <= 2) {
403 sm_set_bitmap(bm_le, bit, ref_count);
405 r = dm_tm_unlock(ll->tm, nb);
406 if (r < 0)
407 return r;
409 if (old > 2) {
410 r = dm_btree_remove(&ll->ref_count_info,
411 ll->ref_count_root,
412 &b, &ll->ref_count_root);
413 if (r)
414 return r;
417 } else {
418 __le32 le_rc = cpu_to_le32(ref_count);
420 sm_set_bitmap(bm_le, bit, 3);
421 r = dm_tm_unlock(ll->tm, nb);
422 if (r < 0)
423 return r;
425 __dm_bless_for_disk(&le_rc);
426 r = dm_btree_insert(&ll->ref_count_info, ll->ref_count_root,
427 &b, &le_rc, &ll->ref_count_root);
428 if (r < 0) {
429 DMERR("ref count insert failed");
430 return r;
434 if (ref_count && !old) {
435 *ev = SM_ALLOC;
436 ll->nr_allocated++;
437 le32_add_cpu(&ie_disk.nr_free, -1);
438 if (le32_to_cpu(ie_disk.none_free_before) == bit)
439 ie_disk.none_free_before = cpu_to_le32(bit + 1);
441 } else if (old && !ref_count) {
442 *ev = SM_FREE;
443 ll->nr_allocated--;
444 le32_add_cpu(&ie_disk.nr_free, 1);
445 ie_disk.none_free_before = cpu_to_le32(min(le32_to_cpu(ie_disk.none_free_before), bit));
448 return ll->save_ie(ll, index, &ie_disk);
451 int sm_ll_inc(struct ll_disk *ll, dm_block_t b, enum allocation_event *ev)
453 int r;
454 uint32_t rc;
456 r = sm_ll_lookup(ll, b, &rc);
457 if (r)
458 return r;
460 return sm_ll_insert(ll, b, rc + 1, ev);
463 int sm_ll_dec(struct ll_disk *ll, dm_block_t b, enum allocation_event *ev)
465 int r;
466 uint32_t rc;
468 r = sm_ll_lookup(ll, b, &rc);
469 if (r)
470 return r;
472 if (!rc)
473 return -EINVAL;
475 return sm_ll_insert(ll, b, rc - 1, ev);
478 int sm_ll_commit(struct ll_disk *ll)
480 int r = 0;
482 if (ll->bitmap_index_changed) {
483 r = ll->commit(ll);
484 if (!r)
485 ll->bitmap_index_changed = false;
488 return r;
491 /*----------------------------------------------------------------*/
493 static int metadata_ll_load_ie(struct ll_disk *ll, dm_block_t index,
494 struct disk_index_entry *ie)
496 memcpy(ie, ll->mi_le.index + index, sizeof(*ie));
497 return 0;
500 static int metadata_ll_save_ie(struct ll_disk *ll, dm_block_t index,
501 struct disk_index_entry *ie)
503 ll->bitmap_index_changed = true;
504 memcpy(ll->mi_le.index + index, ie, sizeof(*ie));
505 return 0;
508 static int metadata_ll_init_index(struct ll_disk *ll)
510 int r;
511 struct dm_block *b;
513 r = dm_tm_new_block(ll->tm, &index_validator, &b);
514 if (r < 0)
515 return r;
517 memcpy(dm_block_data(b), &ll->mi_le, sizeof(ll->mi_le));
518 ll->bitmap_root = dm_block_location(b);
520 return dm_tm_unlock(ll->tm, b);
523 static int metadata_ll_open(struct ll_disk *ll)
525 int r;
526 struct dm_block *block;
528 r = dm_tm_read_lock(ll->tm, ll->bitmap_root,
529 &index_validator, &block);
530 if (r)
531 return r;
533 memcpy(&ll->mi_le, dm_block_data(block), sizeof(ll->mi_le));
534 return dm_tm_unlock(ll->tm, block);
537 static dm_block_t metadata_ll_max_entries(struct ll_disk *ll)
539 return MAX_METADATA_BITMAPS;
542 static int metadata_ll_commit(struct ll_disk *ll)
544 int r, inc;
545 struct dm_block *b;
547 r = dm_tm_shadow_block(ll->tm, ll->bitmap_root, &index_validator, &b, &inc);
548 if (r)
549 return r;
551 memcpy(dm_block_data(b), &ll->mi_le, sizeof(ll->mi_le));
552 ll->bitmap_root = dm_block_location(b);
554 return dm_tm_unlock(ll->tm, b);
557 int sm_ll_new_metadata(struct ll_disk *ll, struct dm_transaction_manager *tm)
559 int r;
561 r = sm_ll_init(ll, tm);
562 if (r < 0)
563 return r;
565 ll->load_ie = metadata_ll_load_ie;
566 ll->save_ie = metadata_ll_save_ie;
567 ll->init_index = metadata_ll_init_index;
568 ll->open_index = metadata_ll_open;
569 ll->max_entries = metadata_ll_max_entries;
570 ll->commit = metadata_ll_commit;
572 ll->nr_blocks = 0;
573 ll->nr_allocated = 0;
575 r = ll->init_index(ll);
576 if (r < 0)
577 return r;
579 r = dm_btree_empty(&ll->ref_count_info, &ll->ref_count_root);
580 if (r < 0)
581 return r;
583 return 0;
586 int sm_ll_open_metadata(struct ll_disk *ll, struct dm_transaction_manager *tm,
587 void *root_le, size_t len)
589 int r;
590 struct disk_sm_root *smr = root_le;
592 if (len < sizeof(struct disk_sm_root)) {
593 DMERR("sm_metadata root too small");
594 return -ENOMEM;
597 r = sm_ll_init(ll, tm);
598 if (r < 0)
599 return r;
601 ll->load_ie = metadata_ll_load_ie;
602 ll->save_ie = metadata_ll_save_ie;
603 ll->init_index = metadata_ll_init_index;
604 ll->open_index = metadata_ll_open;
605 ll->max_entries = metadata_ll_max_entries;
606 ll->commit = metadata_ll_commit;
608 ll->nr_blocks = le64_to_cpu(smr->nr_blocks);
609 ll->nr_allocated = le64_to_cpu(smr->nr_allocated);
610 ll->bitmap_root = le64_to_cpu(smr->bitmap_root);
611 ll->ref_count_root = le64_to_cpu(smr->ref_count_root);
613 return ll->open_index(ll);
616 /*----------------------------------------------------------------*/
618 static int disk_ll_load_ie(struct ll_disk *ll, dm_block_t index,
619 struct disk_index_entry *ie)
621 return dm_btree_lookup(&ll->bitmap_info, ll->bitmap_root, &index, ie);
624 static int disk_ll_save_ie(struct ll_disk *ll, dm_block_t index,
625 struct disk_index_entry *ie)
627 __dm_bless_for_disk(ie);
628 return dm_btree_insert(&ll->bitmap_info, ll->bitmap_root,
629 &index, ie, &ll->bitmap_root);
632 static int disk_ll_init_index(struct ll_disk *ll)
634 return dm_btree_empty(&ll->bitmap_info, &ll->bitmap_root);
637 static int disk_ll_open(struct ll_disk *ll)
639 /* nothing to do */
640 return 0;
643 static dm_block_t disk_ll_max_entries(struct ll_disk *ll)
645 return -1ULL;
648 static int disk_ll_commit(struct ll_disk *ll)
650 return 0;
653 int sm_ll_new_disk(struct ll_disk *ll, struct dm_transaction_manager *tm)
655 int r;
657 r = sm_ll_init(ll, tm);
658 if (r < 0)
659 return r;
661 ll->load_ie = disk_ll_load_ie;
662 ll->save_ie = disk_ll_save_ie;
663 ll->init_index = disk_ll_init_index;
664 ll->open_index = disk_ll_open;
665 ll->max_entries = disk_ll_max_entries;
666 ll->commit = disk_ll_commit;
668 ll->nr_blocks = 0;
669 ll->nr_allocated = 0;
671 r = ll->init_index(ll);
672 if (r < 0)
673 return r;
675 r = dm_btree_empty(&ll->ref_count_info, &ll->ref_count_root);
676 if (r < 0)
677 return r;
679 return 0;
682 int sm_ll_open_disk(struct ll_disk *ll, struct dm_transaction_manager *tm,
683 void *root_le, size_t len)
685 int r;
686 struct disk_sm_root *smr = root_le;
688 if (len < sizeof(struct disk_sm_root)) {
689 DMERR("sm_metadata root too small");
690 return -ENOMEM;
693 r = sm_ll_init(ll, tm);
694 if (r < 0)
695 return r;
697 ll->load_ie = disk_ll_load_ie;
698 ll->save_ie = disk_ll_save_ie;
699 ll->init_index = disk_ll_init_index;
700 ll->open_index = disk_ll_open;
701 ll->max_entries = disk_ll_max_entries;
702 ll->commit = disk_ll_commit;
704 ll->nr_blocks = le64_to_cpu(smr->nr_blocks);
705 ll->nr_allocated = le64_to_cpu(smr->nr_allocated);
706 ll->bitmap_root = le64_to_cpu(smr->bitmap_root);
707 ll->ref_count_root = le64_to_cpu(smr->ref_count_root);
709 return ll->open_index(ll);
712 /*----------------------------------------------------------------*/