revert-mm-fix-blkdev-size-calculation-in-generic_write_checks
[linux-2.6/linux-trees-mm.git] / fs / reiser4 / plugin / file / cryptcompress.h
blobd887f76e44637f67009246c290c36c2e4feea9bd
1 /* Copyright 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */
2 /* See http://www.namesys.com/cryptcompress_design.html */
4 #if !defined( __FS_REISER4_CRYPTCOMPRESS_H__ )
5 #define __FS_REISER4_CRYPTCOMPRESS_H__
7 #include "../../page_cache.h"
8 #include "../compress/compress.h"
9 #include "../crypto/cipher.h"
11 #include <linux/pagemap.h>
13 #define MIN_CLUSTER_SHIFT PAGE_CACHE_SHIFT
14 #define MAX_CLUSTER_SHIFT 16
15 #define MAX_CLUSTER_NRPAGES (1U << MAX_CLUSTER_SHIFT >> PAGE_CACHE_SHIFT)
16 #define DC_CHECKSUM_SIZE 4
18 #define MIN_LATTICE_FACTOR 1
19 #define MAX_LATTICE_FACTOR 32
21 /* this mask contains all non-standard plugins that might
22 be present in reiser4-specific part of inode managed by
23 cryptcompress file plugin */
24 #define cryptcompress_mask \
25 ((1 << PSET_FILE) | \
26 (1 << PSET_CLUSTER) | \
27 (1 << PSET_CIPHER) | \
28 (1 << PSET_DIGEST) | \
29 (1 << PSET_COMPRESSION) | \
30 (1 << PSET_COMPRESSION_MODE))
32 #if REISER4_DEBUG
33 static inline int cluster_shift_ok(int shift)
35 return (shift >= MIN_CLUSTER_SHIFT) && (shift <= MAX_CLUSTER_SHIFT);
37 #endif
39 #if REISER4_DEBUG
40 #define INODE_PGCOUNT(inode) \
41 ({ \
42 assert("edward-1530", inode_file_plugin(inode) == \
43 file_plugin_by_id(CRYPTCOMPRESS_FILE_PLUGIN_ID)); \
44 atomic_read(&cryptcompress_inode_data(inode)->pgcount); \
46 #define INODE_PGCOUNT_INC(inode) \
47 do { \
48 assert("edward-1531", inode_file_plugin(inode) == \
49 file_plugin_by_id(CRYPTCOMPRESS_FILE_PLUGIN_ID)); \
50 atomic_inc(&cryptcompress_inode_data(inode)->pgcount); \
51 } while (0)
52 #define INODE_PGCOUNT_DEC(inode) \
53 do { \
54 if (inode_file_plugin(inode) == \
55 file_plugin_by_id(CRYPTCOMPRESS_FILE_PLUGIN_ID)) \
56 atomic_dec(&cryptcompress_inode_data(inode)->pgcount); \
57 } while (0)
58 #else
59 #define INODE_PGCOUNT(inode) (0)
60 #define INODE_PGCOUNT_INC(inode)
61 #define INODE_PGCOUNT_DEC(inode)
62 #endif /* REISER4_DEBUG */
64 struct tfm_stream {
65 __u8 *data;
66 size_t size;
69 typedef enum {
70 INPUT_STREAM,
71 OUTPUT_STREAM,
72 LAST_STREAM
73 } tfm_stream_id;
75 typedef struct tfm_stream * tfm_unit[LAST_STREAM];
77 static inline __u8 *ts_data(struct tfm_stream * stm)
79 assert("edward-928", stm != NULL);
80 return stm->data;
83 static inline size_t ts_size(struct tfm_stream * stm)
85 assert("edward-929", stm != NULL);
86 return stm->size;
89 static inline void set_ts_size(struct tfm_stream * stm, size_t size)
91 assert("edward-930", stm != NULL);
93 stm->size = size;
96 static inline int alloc_ts(struct tfm_stream ** stm)
98 assert("edward-931", stm);
99 assert("edward-932", *stm == NULL);
101 *stm = kzalloc(sizeof(**stm), reiser4_ctx_gfp_mask_get());
102 if (!*stm)
103 return -ENOMEM;
104 return 0;
107 static inline void free_ts(struct tfm_stream * stm)
109 assert("edward-933", !ts_data(stm));
110 assert("edward-934", !ts_size(stm));
112 kfree(stm);
115 static inline int alloc_ts_data(struct tfm_stream * stm, size_t size)
117 assert("edward-935", !ts_data(stm));
118 assert("edward-936", !ts_size(stm));
119 assert("edward-937", size != 0);
121 stm->data = reiser4_vmalloc(size);
122 if (!stm->data)
123 return -ENOMEM;
124 set_ts_size(stm, size);
125 return 0;
128 static inline void free_ts_data(struct tfm_stream * stm)
130 assert("edward-938", equi(ts_data(stm), ts_size(stm)));
132 if (ts_data(stm))
133 vfree(ts_data(stm));
134 memset(stm, 0, sizeof *stm);
137 /* Write modes for item conversion in flush convert phase */
138 typedef enum {
139 CRC_APPEND_ITEM = 1,
140 CRC_OVERWRITE_ITEM = 2,
141 CRC_CUT_ITEM = 3
142 } cryptcompress_write_mode_t;
144 typedef enum {
145 LC_INVAL = 0, /* invalid value */
146 LC_APPOV = 1, /* append and/or overwrite */
147 LC_TRUNC = 2 /* truncate */
148 } logical_cluster_op;
150 /* Transform cluster.
151 * Intermediate state between page cluster and disk cluster
152 * Is used for data transform (compression/encryption)
154 struct tfm_cluster {
155 coa_set coa; /* compression algorithms info */
156 tfm_unit tun; /* plain and transformed streams */
157 tfm_action act;
158 int uptodate;
159 int lsize; /* number of bytes in logical cluster */
160 int len; /* length of the transform stream */
163 static inline coa_t get_coa(struct tfm_cluster * tc, reiser4_compression_id id,
164 tfm_action act)
166 return tc->coa[id][act];
169 static inline void set_coa(struct tfm_cluster * tc, reiser4_compression_id id,
170 tfm_action act, coa_t coa)
172 tc->coa[id][act] = coa;
175 static inline int alloc_coa(struct tfm_cluster * tc, compression_plugin * cplug)
177 coa_t coa;
179 coa = cplug->alloc(tc->act);
180 if (IS_ERR(coa))
181 return PTR_ERR(coa);
182 set_coa(tc, cplug->h.id, tc->act, coa);
183 return 0;
186 static inline int
187 grab_coa(struct tfm_cluster * tc, compression_plugin * cplug)
189 return (cplug->alloc && !get_coa(tc, cplug->h.id, tc->act) ?
190 alloc_coa(tc, cplug) : 0);
193 static inline void free_coa_set(struct tfm_cluster * tc)
195 tfm_action j;
196 reiser4_compression_id i;
197 compression_plugin *cplug;
199 assert("edward-810", tc != NULL);
201 for (j = 0; j < TFMA_LAST; j++)
202 for (i = 0; i < LAST_COMPRESSION_ID; i++) {
203 if (!get_coa(tc, i, j))
204 continue;
205 cplug = compression_plugin_by_id(i);
206 assert("edward-812", cplug->free != NULL);
207 cplug->free(get_coa(tc, i, j), j);
208 set_coa(tc, i, j, 0);
210 return;
213 static inline struct tfm_stream * get_tfm_stream(struct tfm_cluster * tc,
214 tfm_stream_id id)
216 return tc->tun[id];
219 static inline void set_tfm_stream(struct tfm_cluster * tc,
220 tfm_stream_id id, struct tfm_stream * ts)
222 tc->tun[id] = ts;
225 static inline __u8 *tfm_stream_data(struct tfm_cluster * tc, tfm_stream_id id)
227 return ts_data(get_tfm_stream(tc, id));
230 static inline void set_tfm_stream_data(struct tfm_cluster * tc,
231 tfm_stream_id id, __u8 * data)
233 get_tfm_stream(tc, id)->data = data;
236 static inline size_t tfm_stream_size(struct tfm_cluster * tc, tfm_stream_id id)
238 return ts_size(get_tfm_stream(tc, id));
241 static inline void
242 set_tfm_stream_size(struct tfm_cluster * tc, tfm_stream_id id, size_t size)
244 get_tfm_stream(tc, id)->size = size;
247 static inline int
248 alloc_tfm_stream(struct tfm_cluster * tc, size_t size, tfm_stream_id id)
250 assert("edward-939", tc != NULL);
251 assert("edward-940", !get_tfm_stream(tc, id));
253 tc->tun[id] = kzalloc(sizeof(struct tfm_stream),
254 reiser4_ctx_gfp_mask_get());
255 if (!tc->tun[id])
256 return -ENOMEM;
257 return alloc_ts_data(get_tfm_stream(tc, id), size);
260 static inline int
261 realloc_tfm_stream(struct tfm_cluster * tc, size_t size, tfm_stream_id id)
263 assert("edward-941", tfm_stream_size(tc, id) < size);
264 free_ts_data(get_tfm_stream(tc, id));
265 return alloc_ts_data(get_tfm_stream(tc, id), size);
268 static inline void free_tfm_stream(struct tfm_cluster * tc, tfm_stream_id id)
270 free_ts_data(get_tfm_stream(tc, id));
271 free_ts(get_tfm_stream(tc, id));
272 set_tfm_stream(tc, id, 0);
275 static inline unsigned coa_overrun(compression_plugin * cplug, int ilen)
277 return (cplug->overrun != NULL ? cplug->overrun(ilen) : 0);
280 static inline void free_tfm_unit(struct tfm_cluster * tc)
282 tfm_stream_id id;
283 for (id = 0; id < LAST_STREAM; id++) {
284 if (!get_tfm_stream(tc, id))
285 continue;
286 free_tfm_stream(tc, id);
290 static inline void put_tfm_cluster(struct tfm_cluster * tc)
292 assert("edward-942", tc != NULL);
293 free_coa_set(tc);
294 free_tfm_unit(tc);
297 static inline int tfm_cluster_is_uptodate(struct tfm_cluster * tc)
299 assert("edward-943", tc != NULL);
300 assert("edward-944", tc->uptodate == 0 || tc->uptodate == 1);
301 return (tc->uptodate == 1);
304 static inline void tfm_cluster_set_uptodate(struct tfm_cluster * tc)
306 assert("edward-945", tc != NULL);
307 assert("edward-946", tc->uptodate == 0 || tc->uptodate == 1);
308 tc->uptodate = 1;
309 return;
312 static inline void tfm_cluster_clr_uptodate(struct tfm_cluster * tc)
314 assert("edward-947", tc != NULL);
315 assert("edward-948", tc->uptodate == 0 || tc->uptodate == 1);
316 tc->uptodate = 0;
317 return;
320 static inline int tfm_stream_is_set(struct tfm_cluster * tc, tfm_stream_id id)
322 return (get_tfm_stream(tc, id) &&
323 tfm_stream_data(tc, id) && tfm_stream_size(tc, id));
326 static inline int tfm_cluster_is_set(struct tfm_cluster * tc)
328 int i;
329 for (i = 0; i < LAST_STREAM; i++)
330 if (!tfm_stream_is_set(tc, i))
331 return 0;
332 return 1;
335 static inline void alternate_streams(struct tfm_cluster * tc)
337 struct tfm_stream *tmp = get_tfm_stream(tc, INPUT_STREAM);
339 set_tfm_stream(tc, INPUT_STREAM, get_tfm_stream(tc, OUTPUT_STREAM));
340 set_tfm_stream(tc, OUTPUT_STREAM, tmp);
343 /* Set of states to indicate a kind of data
344 * that will be written to the window */
345 typedef enum {
346 DATA_WINDOW, /* user's data */
347 HOLE_WINDOW /* zeroes (such kind of data can be written
348 * if we start to write from offset > i_size) */
349 } window_stat;
351 /* Window (of logical cluster size) discretely sliding along a file.
352 * Is used to locate hole region in a logical cluster to be properly
353 * represented on disk.
354 * We split a write to cryptcompress file into writes to its logical
355 * clusters. Before writing to a logical cluster we set a window, i.e.
356 * calculate values of the following fields:
358 struct reiser4_slide {
359 unsigned off; /* offset to write from */
360 unsigned count; /* number of bytes to write */
361 unsigned delta; /* number of bytes to append to the hole */
362 window_stat stat; /* what kind of data will be written starting
363 from @off */
366 /* Possible states of a disk cluster */
367 typedef enum {
368 INVAL_DISK_CLUSTER, /* unknown state */
369 PREP_DISK_CLUSTER, /* disk cluster got converted by flush
370 * at least 1 time */
371 UNPR_DISK_CLUSTER, /* disk cluster just created and should be
372 * converted by flush */
373 FAKE_DISK_CLUSTER, /* disk cluster doesn't exist neither in memory
374 * nor on disk */
375 TRNC_DISK_CLUSTER /* disk cluster is partially truncated */
376 } disk_cluster_stat;
378 /* The following structure represents various stages of the same logical
379 * cluster of index @index:
380 * . fixed slide
381 * . page cluster (stage in primary cache)
382 * . transform cluster (transition stage)
383 * . disk cluster (stage in secondary cache)
384 * This structure is used in transition and synchronizing operations, e.g.
385 * transform cluster is a transition state when synchronizing page cluster
386 * and disk cluster.
387 * FIXME: Encapsulate page cluster, disk cluster.
389 struct cluster_handle {
390 cloff_t index; /* offset in a file (unit is a cluster size) */
391 int index_valid; /* for validating the index above, if needed */
392 struct file *file; /* host file */
394 /* logical cluster */
395 struct reiser4_slide *win; /* sliding window to locate holes */
396 logical_cluster_op op; /* logical cluster operation (truncate or
397 append/overwrite) */
398 /* transform cluster */
399 struct tfm_cluster tc; /* contains all needed info to synchronize
400 page cluster and disk cluster) */
401 /* page cluster */
402 int nr_pages; /* number of pages of current checkin action */
403 int old_nrpages; /* number of pages of last checkin action */
404 struct page **pages; /* attached pages */
405 jnode * node; /* jnode for capture */
407 /* disk cluster */
408 hint_t *hint; /* current position in the tree */
409 disk_cluster_stat dstat; /* state of the current disk cluster */
410 int reserved; /* is space for disk cluster reserved */
411 #if REISER4_DEBUG
412 reiser4_context *ctx;
413 int reserved_prepped;
414 int reserved_unprepped;
415 #endif
419 static inline __u8 * tfm_input_data (struct cluster_handle * clust)
421 return tfm_stream_data(&clust->tc, INPUT_STREAM);
424 static inline __u8 * tfm_output_data (struct cluster_handle * clust)
426 return tfm_stream_data(&clust->tc, OUTPUT_STREAM);
429 static inline int reset_cluster_pgset(struct cluster_handle * clust,
430 int nrpages)
432 assert("edward-1057", clust->pages != NULL);
433 memset(clust->pages, 0, sizeof(*clust->pages) * nrpages);
434 return 0;
437 static inline int alloc_cluster_pgset(struct cluster_handle * clust,
438 int nrpages)
440 assert("edward-949", clust != NULL);
441 assert("edward-1362", clust->pages == NULL);
442 assert("edward-950", nrpages != 0 && nrpages <= MAX_CLUSTER_NRPAGES);
444 clust->pages = kzalloc(sizeof(*clust->pages) * nrpages,
445 reiser4_ctx_gfp_mask_get());
446 if (!clust->pages)
447 return RETERR(-ENOMEM);
448 return 0;
451 static inline void free_cluster_pgset(struct cluster_handle * clust)
453 assert("edward-951", clust->pages != NULL);
454 kfree(clust->pages);
455 clust->pages = NULL;
458 static inline void put_cluster_handle(struct cluster_handle * clust)
460 assert("edward-435", clust != NULL);
462 put_tfm_cluster(&clust->tc);
463 if (clust->pages)
464 free_cluster_pgset(clust);
465 memset(clust, 0, sizeof *clust);
468 static inline void inc_keyload_count(struct reiser4_crypto_info * data)
470 assert("edward-1410", data != NULL);
471 data->keyload_count++;
474 static inline void dec_keyload_count(struct reiser4_crypto_info * data)
476 assert("edward-1411", data != NULL);
477 assert("edward-1412", data->keyload_count > 0);
478 data->keyload_count--;
481 static inline int capture_cluster_jnode(jnode * node)
483 return reiser4_try_capture(node, ZNODE_WRITE_LOCK, 0);
486 /* cryptcompress specific part of reiser4_inode */
487 struct cryptcompress_info {
488 struct mutex checkin_mutex; /* This is to serialize
489 * checkin_logical_cluster operations */
490 cloff_t trunc_index; /* Index of the leftmost truncated disk
491 * cluster (to resolve races with read) */
492 struct reiser4_crypto_info *crypt;
494 * the following 2 fields are controlled by compression mode plugin
496 int compress_toggle; /* Current status of compressibility */
497 int lattice_factor; /* Factor of dynamic lattice. FIXME: Have
498 * a compression_toggle to keep the factor
500 #if REISER4_DEBUG
501 atomic_t pgcount; /* number of grabbed pages */
502 #endif
505 static inline void set_compression_toggle (struct cryptcompress_info * info, int val)
507 info->compress_toggle = val;
510 static inline int get_compression_toggle (struct cryptcompress_info * info)
512 return info->compress_toggle;
515 static inline int compression_is_on(struct cryptcompress_info * info)
517 return get_compression_toggle(info) == 1;
520 static inline void turn_on_compression(struct cryptcompress_info * info)
522 set_compression_toggle(info, 1);
525 static inline void turn_off_compression(struct cryptcompress_info * info)
527 set_compression_toggle(info, 0);
530 static inline void set_lattice_factor(struct cryptcompress_info * info, int val)
532 info->lattice_factor = val;
535 static inline int get_lattice_factor(struct cryptcompress_info * info)
537 return info->lattice_factor;
540 struct cryptcompress_info *cryptcompress_inode_data(const struct inode *);
541 int equal_to_rdk(znode *, const reiser4_key *);
542 int goto_right_neighbor(coord_t *, lock_handle *);
543 int cryptcompress_inode_ok(struct inode *inode);
544 int coord_is_unprepped_ctail(const coord_t * coord);
545 extern int do_readpage_ctail(struct inode *, struct cluster_handle *,
546 struct page * page, znode_lock_mode mode);
547 extern int ctail_insert_unprepped_cluster(struct cluster_handle * clust,
548 struct inode * inode);
549 extern int readpages_cryptcompress(struct file*, struct address_space*,
550 struct list_head*, unsigned);
551 int bind_cryptcompress(struct inode *child, struct inode *parent);
552 void destroy_inode_cryptcompress(struct inode * inode);
553 int grab_page_cluster(struct inode *inode, struct cluster_handle * clust,
554 rw_op rw);
555 int write_conversion_hook(struct file *file, struct inode * inode, loff_t pos,
556 struct cluster_handle * clust, int * progress);
557 struct reiser4_crypto_info * inode_crypto_info(struct inode * inode);
558 void inherit_crypto_info_common(struct inode * parent, struct inode * object,
559 int (*can_inherit)(struct inode * child,
560 struct inode * parent));
561 void reiser4_attach_crypto_info(struct inode * inode,
562 struct reiser4_crypto_info * info);
563 void change_crypto_info(struct inode * inode, struct reiser4_crypto_info * new);
564 struct reiser4_crypto_info * reiser4_alloc_crypto_info (struct inode * inode);
566 static inline struct crypto_blkcipher * info_get_cipher(struct reiser4_crypto_info * info)
568 return info->cipher;
571 static inline void info_set_cipher(struct reiser4_crypto_info * info,
572 struct crypto_blkcipher * tfm)
574 info->cipher = tfm;
577 static inline struct crypto_hash * info_get_digest(struct reiser4_crypto_info * info)
579 return info->digest;
582 static inline void info_set_digest(struct reiser4_crypto_info * info,
583 struct crypto_hash * tfm)
585 info->digest = tfm;
588 static inline void put_cluster_page(struct page * page)
590 page_cache_release(page);
593 #endif /* __FS_REISER4_CRYPTCOMPRESS_H__ */
595 /* Make Linus happy.
596 Local variables:
597 c-indentation-style: "K&R"
598 mode-name: "LC"
599 c-basic-offset: 8
600 tab-width: 8
601 fill-column: 120
602 scroll-step: 1
603 End: