1 /* Copyright 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */
2 /* See http://www.namesys.com/cryptcompress_design.html */
4 #if !defined( __FS_REISER4_CRYPTCOMPRESS_H__ )
5 #define __FS_REISER4_CRYPTCOMPRESS_H__
7 #include "../../page_cache.h"
8 #include "../compress/compress.h"
9 #include "../crypto/cipher.h"
11 #include <linux/pagemap.h>
13 #define MIN_CLUSTER_SHIFT PAGE_CACHE_SHIFT
14 #define MAX_CLUSTER_SHIFT 16
15 #define MAX_CLUSTER_NRPAGES (1U << MAX_CLUSTER_SHIFT >> PAGE_CACHE_SHIFT)
16 #define DC_CHECKSUM_SIZE 4
18 #define MIN_LATTICE_FACTOR 1
19 #define MAX_LATTICE_FACTOR 32
21 /* this mask contains all non-standard plugins that might
22 be present in reiser4-specific part of inode managed by
23 cryptcompress file plugin */
24 #define cryptcompress_mask \
26 (1 << PSET_CLUSTER) | \
27 (1 << PSET_CIPHER) | \
28 (1 << PSET_DIGEST) | \
29 (1 << PSET_COMPRESSION) | \
30 (1 << PSET_COMPRESSION_MODE))
33 static inline int cluster_shift_ok(int shift
)
35 return (shift
>= MIN_CLUSTER_SHIFT
) && (shift
<= MAX_CLUSTER_SHIFT
);
40 #define INODE_PGCOUNT(inode) \
42 assert("edward-1530", inode_file_plugin(inode) == \
43 file_plugin_by_id(CRYPTCOMPRESS_FILE_PLUGIN_ID)); \
44 atomic_read(&cryptcompress_inode_data(inode)->pgcount); \
46 #define INODE_PGCOUNT_INC(inode) \
48 assert("edward-1531", inode_file_plugin(inode) == \
49 file_plugin_by_id(CRYPTCOMPRESS_FILE_PLUGIN_ID)); \
50 atomic_inc(&cryptcompress_inode_data(inode)->pgcount); \
52 #define INODE_PGCOUNT_DEC(inode) \
54 if (inode_file_plugin(inode) == \
55 file_plugin_by_id(CRYPTCOMPRESS_FILE_PLUGIN_ID)) \
56 atomic_dec(&cryptcompress_inode_data(inode)->pgcount); \
59 #define INODE_PGCOUNT(inode) (0)
60 #define INODE_PGCOUNT_INC(inode)
61 #define INODE_PGCOUNT_DEC(inode)
62 #endif /* REISER4_DEBUG */
75 typedef struct tfm_stream
* tfm_unit
[LAST_STREAM
];
77 static inline __u8
*ts_data(struct tfm_stream
* stm
)
79 assert("edward-928", stm
!= NULL
);
83 static inline size_t ts_size(struct tfm_stream
* stm
)
85 assert("edward-929", stm
!= NULL
);
89 static inline void set_ts_size(struct tfm_stream
* stm
, size_t size
)
91 assert("edward-930", stm
!= NULL
);
96 static inline int alloc_ts(struct tfm_stream
** stm
)
98 assert("edward-931", stm
);
99 assert("edward-932", *stm
== NULL
);
101 *stm
= kzalloc(sizeof(**stm
), reiser4_ctx_gfp_mask_get());
107 static inline void free_ts(struct tfm_stream
* stm
)
109 assert("edward-933", !ts_data(stm
));
110 assert("edward-934", !ts_size(stm
));
115 static inline int alloc_ts_data(struct tfm_stream
* stm
, size_t size
)
117 assert("edward-935", !ts_data(stm
));
118 assert("edward-936", !ts_size(stm
));
119 assert("edward-937", size
!= 0);
121 stm
->data
= reiser4_vmalloc(size
);
124 set_ts_size(stm
, size
);
128 static inline void free_ts_data(struct tfm_stream
* stm
)
130 assert("edward-938", equi(ts_data(stm
), ts_size(stm
)));
134 memset(stm
, 0, sizeof *stm
);
137 /* Write modes for item conversion in flush convert phase */
140 CRC_OVERWRITE_ITEM
= 2,
142 } cryptcompress_write_mode_t
;
145 LC_INVAL
= 0, /* invalid value */
146 LC_APPOV
= 1, /* append and/or overwrite */
147 LC_TRUNC
= 2 /* truncate */
148 } logical_cluster_op
;
150 /* Transform cluster.
151 * Intermediate state between page cluster and disk cluster
152 * Is used for data transform (compression/encryption)
155 coa_set coa
; /* compression algorithms info */
156 tfm_unit tun
; /* plain and transformed streams */
159 int lsize
; /* number of bytes in logical cluster */
160 int len
; /* length of the transform stream */
163 static inline coa_t
get_coa(struct tfm_cluster
* tc
, reiser4_compression_id id
,
166 return tc
->coa
[id
][act
];
169 static inline void set_coa(struct tfm_cluster
* tc
, reiser4_compression_id id
,
170 tfm_action act
, coa_t coa
)
172 tc
->coa
[id
][act
] = coa
;
175 static inline int alloc_coa(struct tfm_cluster
* tc
, compression_plugin
* cplug
)
179 coa
= cplug
->alloc(tc
->act
);
182 set_coa(tc
, cplug
->h
.id
, tc
->act
, coa
);
187 grab_coa(struct tfm_cluster
* tc
, compression_plugin
* cplug
)
189 return (cplug
->alloc
&& !get_coa(tc
, cplug
->h
.id
, tc
->act
) ?
190 alloc_coa(tc
, cplug
) : 0);
193 static inline void free_coa_set(struct tfm_cluster
* tc
)
196 reiser4_compression_id i
;
197 compression_plugin
*cplug
;
199 assert("edward-810", tc
!= NULL
);
201 for (j
= 0; j
< TFMA_LAST
; j
++)
202 for (i
= 0; i
< LAST_COMPRESSION_ID
; i
++) {
203 if (!get_coa(tc
, i
, j
))
205 cplug
= compression_plugin_by_id(i
);
206 assert("edward-812", cplug
->free
!= NULL
);
207 cplug
->free(get_coa(tc
, i
, j
), j
);
208 set_coa(tc
, i
, j
, 0);
213 static inline struct tfm_stream
* get_tfm_stream(struct tfm_cluster
* tc
,
219 static inline void set_tfm_stream(struct tfm_cluster
* tc
,
220 tfm_stream_id id
, struct tfm_stream
* ts
)
225 static inline __u8
*tfm_stream_data(struct tfm_cluster
* tc
, tfm_stream_id id
)
227 return ts_data(get_tfm_stream(tc
, id
));
230 static inline void set_tfm_stream_data(struct tfm_cluster
* tc
,
231 tfm_stream_id id
, __u8
* data
)
233 get_tfm_stream(tc
, id
)->data
= data
;
236 static inline size_t tfm_stream_size(struct tfm_cluster
* tc
, tfm_stream_id id
)
238 return ts_size(get_tfm_stream(tc
, id
));
242 set_tfm_stream_size(struct tfm_cluster
* tc
, tfm_stream_id id
, size_t size
)
244 get_tfm_stream(tc
, id
)->size
= size
;
248 alloc_tfm_stream(struct tfm_cluster
* tc
, size_t size
, tfm_stream_id id
)
250 assert("edward-939", tc
!= NULL
);
251 assert("edward-940", !get_tfm_stream(tc
, id
));
253 tc
->tun
[id
] = kzalloc(sizeof(struct tfm_stream
),
254 reiser4_ctx_gfp_mask_get());
257 return alloc_ts_data(get_tfm_stream(tc
, id
), size
);
261 realloc_tfm_stream(struct tfm_cluster
* tc
, size_t size
, tfm_stream_id id
)
263 assert("edward-941", tfm_stream_size(tc
, id
) < size
);
264 free_ts_data(get_tfm_stream(tc
, id
));
265 return alloc_ts_data(get_tfm_stream(tc
, id
), size
);
268 static inline void free_tfm_stream(struct tfm_cluster
* tc
, tfm_stream_id id
)
270 free_ts_data(get_tfm_stream(tc
, id
));
271 free_ts(get_tfm_stream(tc
, id
));
272 set_tfm_stream(tc
, id
, 0);
275 static inline unsigned coa_overrun(compression_plugin
* cplug
, int ilen
)
277 return (cplug
->overrun
!= NULL
? cplug
->overrun(ilen
) : 0);
280 static inline void free_tfm_unit(struct tfm_cluster
* tc
)
283 for (id
= 0; id
< LAST_STREAM
; id
++) {
284 if (!get_tfm_stream(tc
, id
))
286 free_tfm_stream(tc
, id
);
290 static inline void put_tfm_cluster(struct tfm_cluster
* tc
)
292 assert("edward-942", tc
!= NULL
);
297 static inline int tfm_cluster_is_uptodate(struct tfm_cluster
* tc
)
299 assert("edward-943", tc
!= NULL
);
300 assert("edward-944", tc
->uptodate
== 0 || tc
->uptodate
== 1);
301 return (tc
->uptodate
== 1);
304 static inline void tfm_cluster_set_uptodate(struct tfm_cluster
* tc
)
306 assert("edward-945", tc
!= NULL
);
307 assert("edward-946", tc
->uptodate
== 0 || tc
->uptodate
== 1);
312 static inline void tfm_cluster_clr_uptodate(struct tfm_cluster
* tc
)
314 assert("edward-947", tc
!= NULL
);
315 assert("edward-948", tc
->uptodate
== 0 || tc
->uptodate
== 1);
320 static inline int tfm_stream_is_set(struct tfm_cluster
* tc
, tfm_stream_id id
)
322 return (get_tfm_stream(tc
, id
) &&
323 tfm_stream_data(tc
, id
) && tfm_stream_size(tc
, id
));
326 static inline int tfm_cluster_is_set(struct tfm_cluster
* tc
)
329 for (i
= 0; i
< LAST_STREAM
; i
++)
330 if (!tfm_stream_is_set(tc
, i
))
335 static inline void alternate_streams(struct tfm_cluster
* tc
)
337 struct tfm_stream
*tmp
= get_tfm_stream(tc
, INPUT_STREAM
);
339 set_tfm_stream(tc
, INPUT_STREAM
, get_tfm_stream(tc
, OUTPUT_STREAM
));
340 set_tfm_stream(tc
, OUTPUT_STREAM
, tmp
);
343 /* Set of states to indicate a kind of data
344 * that will be written to the window */
346 DATA_WINDOW
, /* user's data */
347 HOLE_WINDOW
/* zeroes (such kind of data can be written
348 * if we start to write from offset > i_size) */
351 /* Window (of logical cluster size) discretely sliding along a file.
352 * Is used to locate hole region in a logical cluster to be properly
353 * represented on disk.
354 * We split a write to cryptcompress file into writes to its logical
355 * clusters. Before writing to a logical cluster we set a window, i.e.
356 * calculate values of the following fields:
358 struct reiser4_slide
{
359 unsigned off
; /* offset to write from */
360 unsigned count
; /* number of bytes to write */
361 unsigned delta
; /* number of bytes to append to the hole */
362 window_stat stat
; /* what kind of data will be written starting
366 /* Possible states of a disk cluster */
368 INVAL_DISK_CLUSTER
, /* unknown state */
369 PREP_DISK_CLUSTER
, /* disk cluster got converted by flush
371 UNPR_DISK_CLUSTER
, /* disk cluster just created and should be
372 * converted by flush */
373 FAKE_DISK_CLUSTER
, /* disk cluster doesn't exist neither in memory
375 TRNC_DISK_CLUSTER
/* disk cluster is partially truncated */
378 /* The following structure represents various stages of the same logical
379 * cluster of index @index:
381 * . page cluster (stage in primary cache)
382 * . transform cluster (transition stage)
383 * . disk cluster (stage in secondary cache)
384 * This structure is used in transition and synchronizing operations, e.g.
385 * transform cluster is a transition state when synchronizing page cluster
387 * FIXME: Encapsulate page cluster, disk cluster.
389 struct cluster_handle
{
390 cloff_t index
; /* offset in a file (unit is a cluster size) */
391 int index_valid
; /* for validating the index above, if needed */
392 struct file
*file
; /* host file */
394 /* logical cluster */
395 struct reiser4_slide
*win
; /* sliding window to locate holes */
396 logical_cluster_op op
; /* logical cluster operation (truncate or
398 /* transform cluster */
399 struct tfm_cluster tc
; /* contains all needed info to synchronize
400 page cluster and disk cluster) */
402 int nr_pages
; /* number of pages of current checkin action */
403 int old_nrpages
; /* number of pages of last checkin action */
404 struct page
**pages
; /* attached pages */
405 jnode
* node
; /* jnode for capture */
408 hint_t
*hint
; /* current position in the tree */
409 disk_cluster_stat dstat
; /* state of the current disk cluster */
410 int reserved
; /* is space for disk cluster reserved */
412 reiser4_context
*ctx
;
413 int reserved_prepped
;
414 int reserved_unprepped
;
419 static inline __u8
* tfm_input_data (struct cluster_handle
* clust
)
421 return tfm_stream_data(&clust
->tc
, INPUT_STREAM
);
424 static inline __u8
* tfm_output_data (struct cluster_handle
* clust
)
426 return tfm_stream_data(&clust
->tc
, OUTPUT_STREAM
);
429 static inline int reset_cluster_pgset(struct cluster_handle
* clust
,
432 assert("edward-1057", clust
->pages
!= NULL
);
433 memset(clust
->pages
, 0, sizeof(*clust
->pages
) * nrpages
);
437 static inline int alloc_cluster_pgset(struct cluster_handle
* clust
,
440 assert("edward-949", clust
!= NULL
);
441 assert("edward-1362", clust
->pages
== NULL
);
442 assert("edward-950", nrpages
!= 0 && nrpages
<= MAX_CLUSTER_NRPAGES
);
444 clust
->pages
= kzalloc(sizeof(*clust
->pages
) * nrpages
,
445 reiser4_ctx_gfp_mask_get());
447 return RETERR(-ENOMEM
);
451 static inline void move_cluster_pgset(struct cluster_handle
*clust
,
452 struct page
***pages
, int * nr_pages
)
454 assert("edward-1545", clust
!= NULL
&& clust
->pages
!= NULL
);
455 assert("edward-1546", pages
!= NULL
&& *pages
== NULL
);
456 *pages
= clust
->pages
;
457 *nr_pages
= clust
->nr_pages
;
461 static inline void free_cluster_pgset(struct cluster_handle
* clust
)
463 assert("edward-951", clust
->pages
!= NULL
);
468 static inline void put_cluster_handle(struct cluster_handle
* clust
)
470 assert("edward-435", clust
!= NULL
);
472 put_tfm_cluster(&clust
->tc
);
474 free_cluster_pgset(clust
);
475 memset(clust
, 0, sizeof *clust
);
478 static inline void inc_keyload_count(struct reiser4_crypto_info
* data
)
480 assert("edward-1410", data
!= NULL
);
481 data
->keyload_count
++;
484 static inline void dec_keyload_count(struct reiser4_crypto_info
* data
)
486 assert("edward-1411", data
!= NULL
);
487 assert("edward-1412", data
->keyload_count
> 0);
488 data
->keyload_count
--;
491 static inline int capture_cluster_jnode(jnode
* node
)
493 return reiser4_try_capture(node
, ZNODE_WRITE_LOCK
, 0);
496 /* cryptcompress specific part of reiser4_inode */
497 struct cryptcompress_info
{
498 struct mutex checkin_mutex
; /* This is to serialize
499 * checkin_logical_cluster operations */
500 cloff_t trunc_index
; /* Index of the leftmost truncated disk
501 * cluster (to resolve races with read) */
502 struct reiser4_crypto_info
*crypt
;
504 * the following 2 fields are controlled by compression mode plugin
506 int compress_toggle
; /* Current status of compressibility */
507 int lattice_factor
; /* Factor of dynamic lattice. FIXME: Have
508 * a compression_toggle to keep the factor
511 atomic_t pgcount
; /* number of grabbed pages */
515 static inline void set_compression_toggle (struct cryptcompress_info
* info
, int val
)
517 info
->compress_toggle
= val
;
520 static inline int get_compression_toggle (struct cryptcompress_info
* info
)
522 return info
->compress_toggle
;
525 static inline int compression_is_on(struct cryptcompress_info
* info
)
527 return get_compression_toggle(info
) == 1;
530 static inline void turn_on_compression(struct cryptcompress_info
* info
)
532 set_compression_toggle(info
, 1);
535 static inline void turn_off_compression(struct cryptcompress_info
* info
)
537 set_compression_toggle(info
, 0);
540 static inline void set_lattice_factor(struct cryptcompress_info
* info
, int val
)
542 info
->lattice_factor
= val
;
545 static inline int get_lattice_factor(struct cryptcompress_info
* info
)
547 return info
->lattice_factor
;
550 struct cryptcompress_info
*cryptcompress_inode_data(const struct inode
*);
551 int equal_to_rdk(znode
*, const reiser4_key
*);
552 int goto_right_neighbor(coord_t
*, lock_handle
*);
553 int cryptcompress_inode_ok(struct inode
*inode
);
554 int coord_is_unprepped_ctail(const coord_t
* coord
);
555 extern int do_readpage_ctail(struct inode
*, struct cluster_handle
*,
556 struct page
* page
, znode_lock_mode mode
);
557 extern int ctail_insert_unprepped_cluster(struct cluster_handle
* clust
,
558 struct inode
* inode
);
559 extern int readpages_cryptcompress(struct file
*, struct address_space
*,
560 struct list_head
*, unsigned);
561 int bind_cryptcompress(struct inode
*child
, struct inode
*parent
);
562 void destroy_inode_cryptcompress(struct inode
* inode
);
563 int grab_page_cluster(struct inode
*inode
, struct cluster_handle
* clust
,
565 int write_dispatch_hook(struct file
*file
, struct inode
* inode
,
566 loff_t pos
, struct cluster_handle
* clust
,
567 struct dispatch_context
* cont
);
568 int setattr_dispatch_hook(struct inode
* inode
);
569 struct reiser4_crypto_info
* inode_crypto_info(struct inode
* inode
);
570 void inherit_crypto_info_common(struct inode
* parent
, struct inode
* object
,
571 int (*can_inherit
)(struct inode
* child
,
572 struct inode
* parent
));
573 void reiser4_attach_crypto_info(struct inode
* inode
,
574 struct reiser4_crypto_info
* info
);
575 void change_crypto_info(struct inode
* inode
, struct reiser4_crypto_info
* new);
576 struct reiser4_crypto_info
* reiser4_alloc_crypto_info (struct inode
* inode
);
578 static inline struct crypto_blkcipher
* info_get_cipher(struct reiser4_crypto_info
* info
)
583 static inline void info_set_cipher(struct reiser4_crypto_info
* info
,
584 struct crypto_blkcipher
* tfm
)
589 static inline struct crypto_hash
* info_get_digest(struct reiser4_crypto_info
* info
)
594 static inline void info_set_digest(struct reiser4_crypto_info
* info
,
595 struct crypto_hash
* tfm
)
600 static inline void put_cluster_page(struct page
* page
)
602 page_cache_release(page
);
605 #endif /* __FS_REISER4_CRYPTCOMPRESS_H__ */
609 c-indentation-style: "K&R"