Staging: strip: delete the driver
[linux/fpc-iii.git] / drivers / md / dm-snap-persistent.c
blobc097d8a4823d65f7e3408c9f116c235b19cec735
1 /*
2 * Copyright (C) 2001-2002 Sistina Software (UK) Limited.
3 * Copyright (C) 2006-2008 Red Hat GmbH
5 * This file is released under the GPL.
6 */
8 #include "dm-exception-store.h"
10 #include <linux/mm.h>
11 #include <linux/pagemap.h>
12 #include <linux/vmalloc.h>
13 #include <linux/slab.h>
14 #include <linux/dm-io.h>
16 #define DM_MSG_PREFIX "persistent snapshot"
17 #define DM_CHUNK_SIZE_DEFAULT_SECTORS 32 /* 16KB */
19 /*-----------------------------------------------------------------
20 * Persistent snapshots, by persistent we mean that the snapshot
21 * will survive a reboot.
22 *---------------------------------------------------------------*/
25 * We need to store a record of which parts of the origin have
26 * been copied to the snapshot device. The snapshot code
27 * requires that we copy exception chunks to chunk aligned areas
28 * of the COW store. It makes sense therefore, to store the
29 * metadata in chunk size blocks.
31 * There is no backward or forward compatibility implemented,
32 * snapshots with different disk versions than the kernel will
33 * not be usable. It is expected that "lvcreate" will blank out
34 * the start of a fresh COW device before calling the snapshot
35 * constructor.
37 * The first chunk of the COW device just contains the header.
38 * After this there is a chunk filled with exception metadata,
39 * followed by as many exception chunks as can fit in the
40 * metadata areas.
42 * All on disk structures are in little-endian format. The end
43 * of the exceptions info is indicated by an exception with a
44 * new_chunk of 0, which is invalid since it would point to the
45 * header chunk.
49 * Magic for persistent snapshots: "SnAp" - Feeble isn't it.
51 #define SNAP_MAGIC 0x70416e53
54 * The on-disk version of the metadata.
56 #define SNAPSHOT_DISK_VERSION 1
58 #define NUM_SNAPSHOT_HDR_CHUNKS 1
60 struct disk_header {
61 uint32_t magic;
64 * Is this snapshot valid. There is no way of recovering
65 * an invalid snapshot.
67 uint32_t valid;
70 * Simple, incrementing version. no backward
71 * compatibility.
73 uint32_t version;
75 /* In sectors */
76 uint32_t chunk_size;
79 struct disk_exception {
80 uint64_t old_chunk;
81 uint64_t new_chunk;
84 struct commit_callback {
85 void (*callback)(void *, int success);
86 void *context;
90 * The top level structure for a persistent exception store.
92 struct pstore {
93 struct dm_exception_store *store;
94 int version;
95 int valid;
96 uint32_t exceptions_per_area;
99 * Now that we have an asynchronous kcopyd there is no
100 * need for large chunk sizes, so it wont hurt to have a
101 * whole chunks worth of metadata in memory at once.
103 void *area;
106 * An area of zeros used to clear the next area.
108 void *zero_area;
111 * An area used for header. The header can be written
112 * concurrently with metadata (when invalidating the snapshot),
113 * so it needs a separate buffer.
115 void *header_area;
118 * Used to keep track of which metadata area the data in
119 * 'chunk' refers to.
121 chunk_t current_area;
124 * The next free chunk for an exception.
126 * When creating exceptions, all the chunks here and above are
127 * free. It holds the next chunk to be allocated. On rare
128 * occasions (e.g. after a system crash) holes can be left in
129 * the exception store because chunks can be committed out of
130 * order.
132 * When merging exceptions, it does not necessarily mean all the
133 * chunks here and above are free. It holds the value it would
134 * have held if all chunks had been committed in order of
135 * allocation. Consequently the value may occasionally be
136 * slightly too low, but since it's only used for 'status' and
137 * it can never reach its minimum value too early this doesn't
138 * matter.
141 chunk_t next_free;
144 * The index of next free exception in the current
145 * metadata area.
147 uint32_t current_committed;
149 atomic_t pending_count;
150 uint32_t callback_count;
151 struct commit_callback *callbacks;
152 struct dm_io_client *io_client;
154 struct workqueue_struct *metadata_wq;
157 static unsigned sectors_to_pages(unsigned sectors)
159 return DIV_ROUND_UP(sectors, PAGE_SIZE >> 9);
162 static int alloc_area(struct pstore *ps)
164 int r = -ENOMEM;
165 size_t len;
167 len = ps->store->chunk_size << SECTOR_SHIFT;
170 * Allocate the chunk_size block of memory that will hold
171 * a single metadata area.
173 ps->area = vmalloc(len);
174 if (!ps->area)
175 goto err_area;
177 ps->zero_area = vmalloc(len);
178 if (!ps->zero_area)
179 goto err_zero_area;
180 memset(ps->zero_area, 0, len);
182 ps->header_area = vmalloc(len);
183 if (!ps->header_area)
184 goto err_header_area;
186 return 0;
188 err_header_area:
189 vfree(ps->zero_area);
191 err_zero_area:
192 vfree(ps->area);
194 err_area:
195 return r;
198 static void free_area(struct pstore *ps)
200 if (ps->area)
201 vfree(ps->area);
202 ps->area = NULL;
204 if (ps->zero_area)
205 vfree(ps->zero_area);
206 ps->zero_area = NULL;
208 if (ps->header_area)
209 vfree(ps->header_area);
210 ps->header_area = NULL;
213 struct mdata_req {
214 struct dm_io_region *where;
215 struct dm_io_request *io_req;
216 struct work_struct work;
217 int result;
220 static void do_metadata(struct work_struct *work)
222 struct mdata_req *req = container_of(work, struct mdata_req, work);
224 req->result = dm_io(req->io_req, 1, req->where, NULL);
228 * Read or write a chunk aligned and sized block of data from a device.
230 static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int rw,
231 int metadata)
233 struct dm_io_region where = {
234 .bdev = dm_snap_cow(ps->store->snap)->bdev,
235 .sector = ps->store->chunk_size * chunk,
236 .count = ps->store->chunk_size,
238 struct dm_io_request io_req = {
239 .bi_rw = rw,
240 .mem.type = DM_IO_VMA,
241 .mem.ptr.vma = area,
242 .client = ps->io_client,
243 .notify.fn = NULL,
245 struct mdata_req req;
247 if (!metadata)
248 return dm_io(&io_req, 1, &where, NULL);
250 req.where = &where;
251 req.io_req = &io_req;
254 * Issue the synchronous I/O from a different thread
255 * to avoid generic_make_request recursion.
257 INIT_WORK_ON_STACK(&req.work, do_metadata);
258 queue_work(ps->metadata_wq, &req.work);
259 flush_workqueue(ps->metadata_wq);
261 return req.result;
265 * Convert a metadata area index to a chunk index.
267 static chunk_t area_location(struct pstore *ps, chunk_t area)
269 return 1 + ((ps->exceptions_per_area + 1) * area);
273 * Read or write a metadata area. Remembering to skip the first
274 * chunk which holds the header.
276 static int area_io(struct pstore *ps, int rw)
278 int r;
279 chunk_t chunk;
281 chunk = area_location(ps, ps->current_area);
283 r = chunk_io(ps, ps->area, chunk, rw, 0);
284 if (r)
285 return r;
287 return 0;
290 static void zero_memory_area(struct pstore *ps)
292 memset(ps->area, 0, ps->store->chunk_size << SECTOR_SHIFT);
295 static int zero_disk_area(struct pstore *ps, chunk_t area)
297 return chunk_io(ps, ps->zero_area, area_location(ps, area), WRITE, 0);
300 static int read_header(struct pstore *ps, int *new_snapshot)
302 int r;
303 struct disk_header *dh;
304 unsigned chunk_size;
305 int chunk_size_supplied = 1;
306 char *chunk_err;
309 * Use default chunk size (or logical_block_size, if larger)
310 * if none supplied
312 if (!ps->store->chunk_size) {
313 ps->store->chunk_size = max(DM_CHUNK_SIZE_DEFAULT_SECTORS,
314 bdev_logical_block_size(dm_snap_cow(ps->store->snap)->
315 bdev) >> 9);
316 ps->store->chunk_mask = ps->store->chunk_size - 1;
317 ps->store->chunk_shift = ffs(ps->store->chunk_size) - 1;
318 chunk_size_supplied = 0;
321 ps->io_client = dm_io_client_create(sectors_to_pages(ps->store->
322 chunk_size));
323 if (IS_ERR(ps->io_client))
324 return PTR_ERR(ps->io_client);
326 r = alloc_area(ps);
327 if (r)
328 return r;
330 r = chunk_io(ps, ps->header_area, 0, READ, 1);
331 if (r)
332 goto bad;
334 dh = ps->header_area;
336 if (le32_to_cpu(dh->magic) == 0) {
337 *new_snapshot = 1;
338 return 0;
341 if (le32_to_cpu(dh->magic) != SNAP_MAGIC) {
342 DMWARN("Invalid or corrupt snapshot");
343 r = -ENXIO;
344 goto bad;
347 *new_snapshot = 0;
348 ps->valid = le32_to_cpu(dh->valid);
349 ps->version = le32_to_cpu(dh->version);
350 chunk_size = le32_to_cpu(dh->chunk_size);
352 if (ps->store->chunk_size == chunk_size)
353 return 0;
355 if (chunk_size_supplied)
356 DMWARN("chunk size %u in device metadata overrides "
357 "table chunk size of %u.",
358 chunk_size, ps->store->chunk_size);
360 /* We had a bogus chunk_size. Fix stuff up. */
361 free_area(ps);
363 r = dm_exception_store_set_chunk_size(ps->store, chunk_size,
364 &chunk_err);
365 if (r) {
366 DMERR("invalid on-disk chunk size %u: %s.",
367 chunk_size, chunk_err);
368 return r;
371 r = dm_io_client_resize(sectors_to_pages(ps->store->chunk_size),
372 ps->io_client);
373 if (r)
374 return r;
376 r = alloc_area(ps);
377 return r;
379 bad:
380 free_area(ps);
381 return r;
384 static int write_header(struct pstore *ps)
386 struct disk_header *dh;
388 memset(ps->header_area, 0, ps->store->chunk_size << SECTOR_SHIFT);
390 dh = ps->header_area;
391 dh->magic = cpu_to_le32(SNAP_MAGIC);
392 dh->valid = cpu_to_le32(ps->valid);
393 dh->version = cpu_to_le32(ps->version);
394 dh->chunk_size = cpu_to_le32(ps->store->chunk_size);
396 return chunk_io(ps, ps->header_area, 0, WRITE, 1);
400 * Access functions for the disk exceptions, these do the endian conversions.
402 static struct disk_exception *get_exception(struct pstore *ps, uint32_t index)
404 BUG_ON(index >= ps->exceptions_per_area);
406 return ((struct disk_exception *) ps->area) + index;
409 static void read_exception(struct pstore *ps,
410 uint32_t index, struct disk_exception *result)
412 struct disk_exception *e = get_exception(ps, index);
414 /* copy it */
415 result->old_chunk = le64_to_cpu(e->old_chunk);
416 result->new_chunk = le64_to_cpu(e->new_chunk);
419 static void write_exception(struct pstore *ps,
420 uint32_t index, struct disk_exception *de)
422 struct disk_exception *e = get_exception(ps, index);
424 /* copy it */
425 e->old_chunk = cpu_to_le64(de->old_chunk);
426 e->new_chunk = cpu_to_le64(de->new_chunk);
429 static void clear_exception(struct pstore *ps, uint32_t index)
431 struct disk_exception *e = get_exception(ps, index);
433 /* clear it */
434 e->old_chunk = 0;
435 e->new_chunk = 0;
439 * Registers the exceptions that are present in the current area.
440 * 'full' is filled in to indicate if the area has been
441 * filled.
443 static int insert_exceptions(struct pstore *ps,
444 int (*callback)(void *callback_context,
445 chunk_t old, chunk_t new),
446 void *callback_context,
447 int *full)
449 int r;
450 unsigned int i;
451 struct disk_exception de;
453 /* presume the area is full */
454 *full = 1;
456 for (i = 0; i < ps->exceptions_per_area; i++) {
457 read_exception(ps, i, &de);
460 * If the new_chunk is pointing at the start of
461 * the COW device, where the first metadata area
462 * is we know that we've hit the end of the
463 * exceptions. Therefore the area is not full.
465 if (de.new_chunk == 0LL) {
466 ps->current_committed = i;
467 *full = 0;
468 break;
472 * Keep track of the start of the free chunks.
474 if (ps->next_free <= de.new_chunk)
475 ps->next_free = de.new_chunk + 1;
478 * Otherwise we add the exception to the snapshot.
480 r = callback(callback_context, de.old_chunk, de.new_chunk);
481 if (r)
482 return r;
485 return 0;
488 static int read_exceptions(struct pstore *ps,
489 int (*callback)(void *callback_context, chunk_t old,
490 chunk_t new),
491 void *callback_context)
493 int r, full = 1;
496 * Keeping reading chunks and inserting exceptions until
497 * we find a partially full area.
499 for (ps->current_area = 0; full; ps->current_area++) {
500 r = area_io(ps, READ);
501 if (r)
502 return r;
504 r = insert_exceptions(ps, callback, callback_context, &full);
505 if (r)
506 return r;
509 ps->current_area--;
511 return 0;
514 static struct pstore *get_info(struct dm_exception_store *store)
516 return (struct pstore *) store->context;
519 static void persistent_usage(struct dm_exception_store *store,
520 sector_t *total_sectors,
521 sector_t *sectors_allocated,
522 sector_t *metadata_sectors)
524 struct pstore *ps = get_info(store);
526 *sectors_allocated = ps->next_free * store->chunk_size;
527 *total_sectors = get_dev_size(dm_snap_cow(store->snap)->bdev);
530 * First chunk is the fixed header.
531 * Then there are (ps->current_area + 1) metadata chunks, each one
532 * separated from the next by ps->exceptions_per_area data chunks.
534 *metadata_sectors = (ps->current_area + 1 + NUM_SNAPSHOT_HDR_CHUNKS) *
535 store->chunk_size;
538 static void persistent_dtr(struct dm_exception_store *store)
540 struct pstore *ps = get_info(store);
542 destroy_workqueue(ps->metadata_wq);
544 /* Created in read_header */
545 if (ps->io_client)
546 dm_io_client_destroy(ps->io_client);
547 free_area(ps);
549 /* Allocated in persistent_read_metadata */
550 if (ps->callbacks)
551 vfree(ps->callbacks);
553 kfree(ps);
556 static int persistent_read_metadata(struct dm_exception_store *store,
557 int (*callback)(void *callback_context,
558 chunk_t old, chunk_t new),
559 void *callback_context)
561 int r, uninitialized_var(new_snapshot);
562 struct pstore *ps = get_info(store);
565 * Read the snapshot header.
567 r = read_header(ps, &new_snapshot);
568 if (r)
569 return r;
572 * Now we know correct chunk_size, complete the initialisation.
574 ps->exceptions_per_area = (ps->store->chunk_size << SECTOR_SHIFT) /
575 sizeof(struct disk_exception);
576 ps->callbacks = dm_vcalloc(ps->exceptions_per_area,
577 sizeof(*ps->callbacks));
578 if (!ps->callbacks)
579 return -ENOMEM;
582 * Do we need to setup a new snapshot ?
584 if (new_snapshot) {
585 r = write_header(ps);
586 if (r) {
587 DMWARN("write_header failed");
588 return r;
591 ps->current_area = 0;
592 zero_memory_area(ps);
593 r = zero_disk_area(ps, 0);
594 if (r)
595 DMWARN("zero_disk_area(0) failed");
596 return r;
599 * Sanity checks.
601 if (ps->version != SNAPSHOT_DISK_VERSION) {
602 DMWARN("unable to handle snapshot disk version %d",
603 ps->version);
604 return -EINVAL;
608 * Metadata are valid, but snapshot is invalidated
610 if (!ps->valid)
611 return 1;
614 * Read the metadata.
616 r = read_exceptions(ps, callback, callback_context);
618 return r;
621 static int persistent_prepare_exception(struct dm_exception_store *store,
622 struct dm_exception *e)
624 struct pstore *ps = get_info(store);
625 uint32_t stride;
626 chunk_t next_free;
627 sector_t size = get_dev_size(dm_snap_cow(store->snap)->bdev);
629 /* Is there enough room ? */
630 if (size < ((ps->next_free + 1) * store->chunk_size))
631 return -ENOSPC;
633 e->new_chunk = ps->next_free;
636 * Move onto the next free pending, making sure to take
637 * into account the location of the metadata chunks.
639 stride = (ps->exceptions_per_area + 1);
640 next_free = ++ps->next_free;
641 if (sector_div(next_free, stride) == 1)
642 ps->next_free++;
644 atomic_inc(&ps->pending_count);
645 return 0;
648 static void persistent_commit_exception(struct dm_exception_store *store,
649 struct dm_exception *e,
650 void (*callback) (void *, int success),
651 void *callback_context)
653 unsigned int i;
654 struct pstore *ps = get_info(store);
655 struct disk_exception de;
656 struct commit_callback *cb;
658 de.old_chunk = e->old_chunk;
659 de.new_chunk = e->new_chunk;
660 write_exception(ps, ps->current_committed++, &de);
663 * Add the callback to the back of the array. This code
664 * is the only place where the callback array is
665 * manipulated, and we know that it will never be called
666 * multiple times concurrently.
668 cb = ps->callbacks + ps->callback_count++;
669 cb->callback = callback;
670 cb->context = callback_context;
673 * If there are exceptions in flight and we have not yet
674 * filled this metadata area there's nothing more to do.
676 if (!atomic_dec_and_test(&ps->pending_count) &&
677 (ps->current_committed != ps->exceptions_per_area))
678 return;
681 * If we completely filled the current area, then wipe the next one.
683 if ((ps->current_committed == ps->exceptions_per_area) &&
684 zero_disk_area(ps, ps->current_area + 1))
685 ps->valid = 0;
688 * Commit exceptions to disk.
690 if (ps->valid && area_io(ps, WRITE_BARRIER))
691 ps->valid = 0;
694 * Advance to the next area if this one is full.
696 if (ps->current_committed == ps->exceptions_per_area) {
697 ps->current_committed = 0;
698 ps->current_area++;
699 zero_memory_area(ps);
702 for (i = 0; i < ps->callback_count; i++) {
703 cb = ps->callbacks + i;
704 cb->callback(cb->context, ps->valid);
707 ps->callback_count = 0;
710 static int persistent_prepare_merge(struct dm_exception_store *store,
711 chunk_t *last_old_chunk,
712 chunk_t *last_new_chunk)
714 struct pstore *ps = get_info(store);
715 struct disk_exception de;
716 int nr_consecutive;
717 int r;
720 * When current area is empty, move back to preceding area.
722 if (!ps->current_committed) {
724 * Have we finished?
726 if (!ps->current_area)
727 return 0;
729 ps->current_area--;
730 r = area_io(ps, READ);
731 if (r < 0)
732 return r;
733 ps->current_committed = ps->exceptions_per_area;
736 read_exception(ps, ps->current_committed - 1, &de);
737 *last_old_chunk = de.old_chunk;
738 *last_new_chunk = de.new_chunk;
741 * Find number of consecutive chunks within the current area,
742 * working backwards.
744 for (nr_consecutive = 1; nr_consecutive < ps->current_committed;
745 nr_consecutive++) {
746 read_exception(ps, ps->current_committed - 1 - nr_consecutive,
747 &de);
748 if (de.old_chunk != *last_old_chunk - nr_consecutive ||
749 de.new_chunk != *last_new_chunk - nr_consecutive)
750 break;
753 return nr_consecutive;
756 static int persistent_commit_merge(struct dm_exception_store *store,
757 int nr_merged)
759 int r, i;
760 struct pstore *ps = get_info(store);
762 BUG_ON(nr_merged > ps->current_committed);
764 for (i = 0; i < nr_merged; i++)
765 clear_exception(ps, ps->current_committed - 1 - i);
767 r = area_io(ps, WRITE);
768 if (r < 0)
769 return r;
771 ps->current_committed -= nr_merged;
774 * At this stage, only persistent_usage() uses ps->next_free, so
775 * we make no attempt to keep ps->next_free strictly accurate
776 * as exceptions may have been committed out-of-order originally.
777 * Once a snapshot has become merging, we set it to the value it
778 * would have held had all the exceptions been committed in order.
780 * ps->current_area does not get reduced by prepare_merge() until
781 * after commit_merge() has removed the nr_merged previous exceptions.
783 ps->next_free = (area_location(ps, ps->current_area) - 1) +
784 (ps->current_committed + 1) + NUM_SNAPSHOT_HDR_CHUNKS;
786 return 0;
789 static void persistent_drop_snapshot(struct dm_exception_store *store)
791 struct pstore *ps = get_info(store);
793 ps->valid = 0;
794 if (write_header(ps))
795 DMWARN("write header failed");
798 static int persistent_ctr(struct dm_exception_store *store,
799 unsigned argc, char **argv)
801 struct pstore *ps;
803 /* allocate the pstore */
804 ps = kzalloc(sizeof(*ps), GFP_KERNEL);
805 if (!ps)
806 return -ENOMEM;
808 ps->store = store;
809 ps->valid = 1;
810 ps->version = SNAPSHOT_DISK_VERSION;
811 ps->area = NULL;
812 ps->zero_area = NULL;
813 ps->header_area = NULL;
814 ps->next_free = NUM_SNAPSHOT_HDR_CHUNKS + 1; /* header and 1st area */
815 ps->current_committed = 0;
817 ps->callback_count = 0;
818 atomic_set(&ps->pending_count, 0);
819 ps->callbacks = NULL;
821 ps->metadata_wq = create_singlethread_workqueue("ksnaphd");
822 if (!ps->metadata_wq) {
823 kfree(ps);
824 DMERR("couldn't start header metadata update thread");
825 return -ENOMEM;
828 store->context = ps;
830 return 0;
833 static unsigned persistent_status(struct dm_exception_store *store,
834 status_type_t status, char *result,
835 unsigned maxlen)
837 unsigned sz = 0;
839 switch (status) {
840 case STATUSTYPE_INFO:
841 break;
842 case STATUSTYPE_TABLE:
843 DMEMIT(" P %llu", (unsigned long long)store->chunk_size);
846 return sz;
849 static struct dm_exception_store_type _persistent_type = {
850 .name = "persistent",
851 .module = THIS_MODULE,
852 .ctr = persistent_ctr,
853 .dtr = persistent_dtr,
854 .read_metadata = persistent_read_metadata,
855 .prepare_exception = persistent_prepare_exception,
856 .commit_exception = persistent_commit_exception,
857 .prepare_merge = persistent_prepare_merge,
858 .commit_merge = persistent_commit_merge,
859 .drop_snapshot = persistent_drop_snapshot,
860 .usage = persistent_usage,
861 .status = persistent_status,
864 static struct dm_exception_store_type _persistent_compat_type = {
865 .name = "P",
866 .module = THIS_MODULE,
867 .ctr = persistent_ctr,
868 .dtr = persistent_dtr,
869 .read_metadata = persistent_read_metadata,
870 .prepare_exception = persistent_prepare_exception,
871 .commit_exception = persistent_commit_exception,
872 .prepare_merge = persistent_prepare_merge,
873 .commit_merge = persistent_commit_merge,
874 .drop_snapshot = persistent_drop_snapshot,
875 .usage = persistent_usage,
876 .status = persistent_status,
879 int dm_persistent_snapshot_init(void)
881 int r;
883 r = dm_exception_store_type_register(&_persistent_type);
884 if (r) {
885 DMERR("Unable to register persistent exception store type");
886 return r;
889 r = dm_exception_store_type_register(&_persistent_compat_type);
890 if (r) {
891 DMERR("Unable to register old-style persistent exception "
892 "store type");
893 dm_exception_store_type_unregister(&_persistent_type);
894 return r;
897 return r;
900 void dm_persistent_snapshot_exit(void)
902 dm_exception_store_type_unregister(&_persistent_type);
903 dm_exception_store_type_unregister(&_persistent_compat_type);