PM / sleep: Asynchronous threads for suspend_noirq
[linux/fpc-iii.git] / drivers / md / dm-exception-store.h
blob0b2536247cf55a3215223b8b0c72ff29a629b87a
1 /*
2 * Copyright (C) 2001-2002 Sistina Software (UK) Limited.
3 * Copyright (C) 2008 Red Hat, Inc. All rights reserved.
5 * Device-mapper snapshot exception store.
7 * This file is released under the GPL.
8 */
10 #ifndef _LINUX_DM_EXCEPTION_STORE
11 #define _LINUX_DM_EXCEPTION_STORE
13 #include <linux/blkdev.h>
14 #include <linux/device-mapper.h>
17 * The snapshot code deals with largish chunks of the disk at a
18 * time. Typically 32k - 512k.
20 typedef sector_t chunk_t;
23 * An exception is used where an old chunk of data has been
24 * replaced by a new one.
25 * If chunk_t is 64 bits in size, the top 8 bits of new_chunk hold the number
26 * of chunks that follow contiguously. Remaining bits hold the number of the
27 * chunk within the device.
29 struct dm_exception {
30 struct list_head hash_list;
32 chunk_t old_chunk;
33 chunk_t new_chunk;
37 * Abstraction to handle the meta/layout of exception stores (the
38 * COW device).
40 struct dm_exception_store;
41 struct dm_exception_store_type {
42 const char *name;
43 struct module *module;
45 int (*ctr) (struct dm_exception_store *store,
46 unsigned argc, char **argv);
49 * Destroys this object when you've finished with it.
51 void (*dtr) (struct dm_exception_store *store);
54 * The target shouldn't read the COW device until this is
55 * called. As exceptions are read from the COW, they are
56 * reported back via the callback.
58 int (*read_metadata) (struct dm_exception_store *store,
59 int (*callback)(void *callback_context,
60 chunk_t old, chunk_t new),
61 void *callback_context);
64 * Find somewhere to store the next exception.
66 int (*prepare_exception) (struct dm_exception_store *store,
67 struct dm_exception *e);
70 * Update the metadata with this exception.
72 void (*commit_exception) (struct dm_exception_store *store,
73 struct dm_exception *e,
74 void (*callback) (void *, int success),
75 void *callback_context);
78 * Returns 0 if the exception store is empty.
80 * If there are exceptions still to be merged, sets
81 * *last_old_chunk and *last_new_chunk to the most recent
82 * still-to-be-merged chunk and returns the number of
83 * consecutive previous ones.
85 int (*prepare_merge) (struct dm_exception_store *store,
86 chunk_t *last_old_chunk, chunk_t *last_new_chunk);
89 * Clear the last n exceptions.
90 * nr_merged must be <= the value returned by prepare_merge.
92 int (*commit_merge) (struct dm_exception_store *store, int nr_merged);
95 * The snapshot is invalid, note this in the metadata.
97 void (*drop_snapshot) (struct dm_exception_store *store);
99 unsigned (*status) (struct dm_exception_store *store,
100 status_type_t status, char *result,
101 unsigned maxlen);
104 * Return how full the snapshot is.
106 void (*usage) (struct dm_exception_store *store,
107 sector_t *total_sectors, sector_t *sectors_allocated,
108 sector_t *metadata_sectors);
110 /* For internal device-mapper use only. */
111 struct list_head list;
114 struct dm_snapshot;
116 struct dm_exception_store {
117 struct dm_exception_store_type *type;
118 struct dm_snapshot *snap;
120 /* Size of data blocks saved - must be a power of 2 */
121 unsigned chunk_size;
122 unsigned chunk_mask;
123 unsigned chunk_shift;
125 void *context;
129 * Obtain the origin or cow device used by a given snapshot.
131 struct dm_dev *dm_snap_origin(struct dm_snapshot *snap);
132 struct dm_dev *dm_snap_cow(struct dm_snapshot *snap);
135 * Funtions to manipulate consecutive chunks
137 # if defined(CONFIG_LBDAF) || (BITS_PER_LONG == 64)
138 # define DM_CHUNK_CONSECUTIVE_BITS 8
139 # define DM_CHUNK_NUMBER_BITS 56
141 static inline chunk_t dm_chunk_number(chunk_t chunk)
143 return chunk & (chunk_t)((1ULL << DM_CHUNK_NUMBER_BITS) - 1ULL);
146 static inline unsigned dm_consecutive_chunk_count(struct dm_exception *e)
148 return e->new_chunk >> DM_CHUNK_NUMBER_BITS;
151 static inline void dm_consecutive_chunk_count_inc(struct dm_exception *e)
153 e->new_chunk += (1ULL << DM_CHUNK_NUMBER_BITS);
155 BUG_ON(!dm_consecutive_chunk_count(e));
158 static inline void dm_consecutive_chunk_count_dec(struct dm_exception *e)
160 BUG_ON(!dm_consecutive_chunk_count(e));
162 e->new_chunk -= (1ULL << DM_CHUNK_NUMBER_BITS);
165 # else
166 # define DM_CHUNK_CONSECUTIVE_BITS 0
168 static inline chunk_t dm_chunk_number(chunk_t chunk)
170 return chunk;
173 static inline unsigned dm_consecutive_chunk_count(struct dm_exception *e)
175 return 0;
178 static inline void dm_consecutive_chunk_count_inc(struct dm_exception *e)
182 static inline void dm_consecutive_chunk_count_dec(struct dm_exception *e)
186 # endif
189 * Return the number of sectors in the device.
191 static inline sector_t get_dev_size(struct block_device *bdev)
193 return i_size_read(bdev->bd_inode) >> SECTOR_SHIFT;
196 static inline chunk_t sector_to_chunk(struct dm_exception_store *store,
197 sector_t sector)
199 return sector >> store->chunk_shift;
202 int dm_exception_store_type_register(struct dm_exception_store_type *type);
203 int dm_exception_store_type_unregister(struct dm_exception_store_type *type);
205 int dm_exception_store_set_chunk_size(struct dm_exception_store *store,
206 unsigned chunk_size,
207 char **error);
209 int dm_exception_store_create(struct dm_target *ti, int argc, char **argv,
210 struct dm_snapshot *snap,
211 unsigned *args_used,
212 struct dm_exception_store **store);
213 void dm_exception_store_destroy(struct dm_exception_store *store);
215 int dm_exception_store_init(void);
216 void dm_exception_store_exit(void);
219 * Two exception store implementations.
221 int dm_persistent_snapshot_init(void);
222 void dm_persistent_snapshot_exit(void);
224 int dm_transient_snapshot_init(void);
225 void dm_transient_snapshot_exit(void);
227 #endif /* _LINUX_DM_EXCEPTION_STORE */