fed up with those stupid warnings
[mmotm.git] / fs / reiser4 / carry.h
blobd1f5b608442b869907e40730c64e33db848733c8
1 /* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by
2 reiser4/README */
4 /* Functions and data types to "carry" tree modification(s) upward.
5 See fs/reiser4/carry.c for details. */
7 #if !defined(__FS_REISER4_CARRY_H__)
8 #define __FS_REISER4_CARRY_H__
10 #include "forward.h"
11 #include "debug.h"
12 #include "pool.h"
13 #include "znode.h"
15 #include <linux/types.h>
17 /* &carry_node - "location" of carry node.
19 "location" of node that is involved or going to be involved into
20 carry process. Node where operation will be carried to on the
21 parent level cannot be recorded explicitly. Operation will be carried
22 usually to the parent of some node (where changes are performed at
23 the current level) or, to the left neighbor of its parent. But while
24 modifications are performed at the current level, parent may
25 change. So, we have to allow some indirection (or, positevly,
26 flexibility) in locating carry nodes.
29 typedef struct carry_node {
30 /* pool linkage */
31 struct reiser4_pool_header header;
33 /* base node from which real_node is calculated. See
34 fs/reiser4/carry.c:lock_carry_node(). */
35 znode *node;
37 /* how to get ->real_node */
38 /* to get ->real_node obtain parent of ->node */
39 __u32 parent:1;
40 /* to get ->real_node obtain left neighbor of parent of
41 ->node */
42 __u32 left:1;
43 __u32 left_before:1;
45 /* locking */
47 /* this node was locked by carry process and should be
48 unlocked when carry leaves a level */
49 __u32 unlock:1;
51 /* disk block for this node was allocated by carry process and
52 should be deallocated when carry leaves a level */
53 __u32 deallocate:1;
54 /* this carry node was allocated by carry process and should be
55 freed when carry leaves a level */
56 __u32 free:1;
58 /* type of lock we want to take on this node */
59 lock_handle lock_handle;
60 } carry_node;
62 /* &carry_opcode - elementary operations that can be carried upward
64 Operations that carry() can handle. This list is supposed to be
65 expanded.
67 Each carry operation (cop) is handled by appropriate function defined
68 in fs/reiser4/carry.c. For example COP_INSERT is handled by
69 fs/reiser4/carry.c:carry_insert() etc. These functions in turn
70 call plugins of nodes affected by operation to modify nodes' content
71 and to gather operations to be performed on the next level.
74 typedef enum {
75 /* insert new item into node. */
76 COP_INSERT,
77 /* delete pointer from parent node */
78 COP_DELETE,
79 /* remove part of or whole node. */
80 COP_CUT,
81 /* increase size of item. */
82 COP_PASTE,
83 /* insert extent (that is sequence of unformatted nodes). */
84 COP_EXTENT,
85 /* update delimiting key in least common ancestor of two
86 nodes. This is performed when items are moved between two
87 nodes.
89 COP_UPDATE,
90 /* insert flow */
91 COP_INSERT_FLOW,
92 COP_LAST_OP,
93 } carry_opcode;
95 #define CARRY_FLOW_NEW_NODES_LIMIT 20
97 /* mode (or subtype) of COP_{INSERT|PASTE} operation. Specifies how target
98 item is determined. */
99 typedef enum {
100 /* target item is one containing pointer to the ->child node */
101 COPT_CHILD,
102 /* target item is given explicitly by @coord */
103 COPT_ITEM_DATA,
104 /* target item is given by key */
105 COPT_KEY,
106 /* see insert_paste_common() for more comments on this. */
107 COPT_PASTE_RESTARTED,
108 } cop_insert_pos_type;
110 /* flags to cut and delete */
111 typedef enum {
112 /* don't kill node even if it became completely empty as results of
113 * cut. This is needed for eottl handling. See carry_extent() for
114 * details. */
115 DELETE_RETAIN_EMPTY = (1 << 0)
116 } cop_delete_flag;
119 * carry() implements "lock handle tracking" feature.
121 * Callers supply carry with node where to perform initial operation and lock
122 * handle on this node. Trying to optimize node utilization carry may actually
123 * move insertion point to different node. Callers expect that lock handle
124 * will rebe transferred to the new node also.
127 typedef enum {
128 /* transfer lock handle along with insertion point */
129 CARRY_TRACK_CHANGE = 1,
130 /* acquire new lock handle to the node where insertion point is. This
131 * is used when carry() client doesn't initially possess lock handle
132 * on the insertion point node, for example, by extent insertion
133 * code. See carry_extent(). */
134 CARRY_TRACK_NODE = 2
135 } carry_track_type;
137 /* data supplied to COP_{INSERT|PASTE} by callers */
138 typedef struct carry_insert_data {
139 /* position where new item is to be inserted */
140 coord_t *coord;
141 /* new item description */
142 reiser4_item_data * data;
143 /* key of new item */
144 const reiser4_key * key;
145 } carry_insert_data;
147 /* cut and kill are similar, so carry_cut_data and carry_kill_data share the
148 below structure of parameters */
149 struct cut_kill_params {
150 /* coord where cut starts (inclusive) */
151 coord_t *from;
152 /* coord where cut stops (inclusive, this item/unit will also be
153 * cut) */
154 coord_t *to;
155 /* starting key. This is necessary when item and unit pos don't
156 * uniquely identify what portion or tree to remove. For example, this
157 * indicates what portion of extent unit will be affected. */
158 const reiser4_key * from_key;
159 /* exclusive stop key */
160 const reiser4_key * to_key;
161 /* if this is not NULL, smallest actually removed key is stored
162 * here. */
163 reiser4_key *smallest_removed;
164 /* kill_node_content() is called for file truncate */
165 int truncate;
168 struct carry_cut_data {
169 struct cut_kill_params params;
172 struct carry_kill_data {
173 struct cut_kill_params params;
174 /* parameter to be passed to the ->kill_hook() method of item
175 * plugin */
176 /*void *iplug_params; *//* FIXME: unused currently */
177 /* if not NULL---inode whose items are being removed. This is needed
178 * for ->kill_hook() of extent item to update VM structures when
179 * removing pages. */
180 struct inode *inode;
181 /* sibling list maintenance is complicated by existence of eottl. When
182 * eottl whose left and right neighbors are formatted leaves is
183 * removed, one has to connect said leaves in the sibling list. This
184 * cannot be done when extent removal is just started as locking rules
185 * require sibling list update to happen atomically with removal of
186 * extent item. Therefore: 1. pointers to left and right neighbors
187 * have to be passed down to the ->kill_hook() of extent item, and
188 * 2. said neighbors have to be locked. */
189 lock_handle *left;
190 lock_handle *right;
191 /* flags modifying behavior of kill. Currently, it may have
192 DELETE_RETAIN_EMPTY set. */
193 unsigned flags;
194 char *buf;
197 /* &carry_tree_op - operation to "carry" upward.
199 Description of an operation we want to "carry" to the upper level of
200 a tree: e.g, when we insert something and there is not enough space
201 we allocate a new node and "carry" the operation of inserting a
202 pointer to the new node to the upper level, on removal of empty node,
203 we carry up operation of removing appropriate entry from parent.
205 There are two types of carry ops: when adding or deleting node we
206 node at the parent level where appropriate modification has to be
207 performed is known in advance. When shifting items between nodes
208 (split, merge), delimiting key should be changed in the least common
209 parent of the nodes involved that is not known in advance.
211 For the operations of the first type we store in &carry_op pointer to
212 the &carry_node at the parent level. For the operation of the second
213 type we store &carry_node or parents of the left and right nodes
214 modified and keep track of them upward until they coincide.
217 typedef struct carry_op {
218 /* pool linkage */
219 struct reiser4_pool_header header;
220 carry_opcode op;
221 /* node on which operation is to be performed:
223 for insert, paste: node where new item is to be inserted
225 for delete: node where pointer is to be deleted
227 for cut: node to cut from
229 for update: node where delimiting key is to be modified
231 for modify: parent of modified node
234 carry_node *node;
235 union {
236 struct {
237 /* (sub-)type of insertion/paste. Taken from
238 cop_insert_pos_type. */
239 __u8 type;
240 /* various operation flags. Taken from
241 cop_insert_flag. */
242 __u8 flags;
243 carry_insert_data *d;
244 carry_node *child;
245 znode *brother;
246 } insert, paste, extent;
248 struct {
249 int is_cut;
250 union {
251 carry_kill_data *kill;
252 carry_cut_data *cut;
253 } u;
254 } cut_or_kill;
256 struct {
257 carry_node *left;
258 } update;
259 struct {
260 /* changed child */
261 carry_node *child;
262 /* bitmask of changes. See &cop_modify_flag */
263 __u32 flag;
264 } modify;
265 struct {
266 /* flags to deletion operation. Are taken from
267 cop_delete_flag */
268 __u32 flags;
269 /* child to delete from parent. If this is
270 NULL, delete op->node. */
271 carry_node *child;
272 } delete;
273 struct {
274 /* various operation flags. Taken from
275 cop_insert_flag. */
276 __u32 flags;
277 flow_t *flow;
278 coord_t *insert_point;
279 reiser4_item_data *data;
280 /* flow insertion is limited by number of new blocks
281 added in that operation which do not get any data
282 but part of flow. This limit is set by macro
283 CARRY_FLOW_NEW_NODES_LIMIT. This field stores number
284 of nodes added already during one carry_flow */
285 int new_nodes;
286 } insert_flow;
287 } u;
288 } carry_op;
290 /* &carry_op_pool - preallocated pool of carry operations, and nodes */
291 typedef struct carry_pool {
292 carry_op op[CARRIES_POOL_SIZE];
293 struct reiser4_pool op_pool;
294 carry_node node[NODES_LOCKED_POOL_SIZE];
295 struct reiser4_pool node_pool;
296 } carry_pool;
298 /* &carry_tree_level - carry process on given level
300 Description of balancing process on the given level.
302 No need for locking here, as carry_tree_level is essentially per
303 thread thing (for now).
306 struct carry_level {
307 /* this level may be restarted */
308 __u32 restartable:1;
309 /* list of carry nodes on this level, ordered by key order */
310 struct list_head nodes;
311 struct list_head ops;
312 /* pool where new objects are allocated from */
313 carry_pool *pool;
314 int ops_num;
315 int nodes_num;
316 /* new root created on this level, if any */
317 znode *new_root;
318 /* This is set by caller (insert_by_key(), rreiser4_esize_item(), etc.)
319 when they want ->tracked to automagically wander to the node where
320 insertion point moved after insert or paste.
322 carry_track_type track_type;
323 /* lock handle supplied by user that we are tracking. See
324 above. */
325 lock_handle *tracked;
328 /* information carry passes to plugin methods that may add new operations to
329 the @todo queue */
330 struct carry_plugin_info {
331 carry_level *doing;
332 carry_level *todo;
335 int reiser4_carry(carry_level * doing, carry_level * done);
337 carry_node *reiser4_add_carry(carry_level * level, pool_ordering order,
338 carry_node * reference);
339 carry_node *reiser4_add_carry_skip(carry_level * level, pool_ordering order,
340 carry_node * reference);
342 extern carry_node *insert_carry_node(carry_level * doing,
343 carry_level * todo, const znode * node);
345 extern carry_pool *init_carry_pool(int);
346 extern void done_carry_pool(carry_pool * pool);
348 extern void init_carry_level(carry_level * level, carry_pool * pool);
350 extern carry_op *reiser4_post_carry(carry_level * level, carry_opcode op,
351 znode * node, int apply_to_parent);
352 extern carry_op *node_post_carry(carry_plugin_info * info, carry_opcode op,
353 znode * node, int apply_to_parent_p);
355 carry_node *add_new_znode(znode * brother, carry_node * reference,
356 carry_level * doing, carry_level * todo);
358 carry_node *find_carry_node(carry_level * level, const znode * node);
360 extern znode *reiser4_carry_real(const carry_node * node);
362 /* helper macros to iterate over carry queues */
364 #define carry_node_next(node) \
365 list_entry((node)->header.level_linkage.next, carry_node, \
366 header.level_linkage)
368 #define carry_node_prev(node) \
369 list_entry((node)->header.level_linkage.prev, carry_node, \
370 header.level_linkage)
372 #define carry_node_front(level) \
373 list_entry((level)->nodes.next, carry_node, header.level_linkage)
375 #define carry_node_back(level) \
376 list_entry((level)->nodes.prev, carry_node, header.level_linkage)
378 #define carry_node_end(level, node) \
379 (&(level)->nodes == &(node)->header.level_linkage)
381 /* macro to iterate over all operations in a @level */
382 #define for_all_ops(level /* carry level (of type carry_level *) */, \
383 op /* pointer to carry operation, modified by loop (of \
384 * type carry_op *) */, \
385 tmp /* pointer to carry operation (of type carry_op *), \
386 * used to make iterator stable in the face of \
387 * deletions from the level */ ) \
388 for (op = list_entry(level->ops.next, carry_op, header.level_linkage), \
389 tmp = list_entry(op->header.level_linkage.next, carry_op, header.level_linkage); \
390 &op->header.level_linkage != &level->ops; \
391 op = tmp, \
392 tmp = list_entry(op->header.level_linkage.next, carry_op, header.level_linkage))
394 #if 0
395 for (op = (carry_op *) pool_level_list_front(&level->ops), \
396 tmp = (carry_op *) pool_level_list_next(&op->header) ; \
397 !pool_level_list_end(&level->ops, &op->header) ; \
398 op = tmp, tmp = (carry_op *) pool_level_list_next(&op->header))
399 #endif
401 /* macro to iterate over all nodes in a @level */ \
402 #define for_all_nodes(level /* carry level (of type carry_level *) */, \
403 node /* pointer to carry node, modified by loop (of \
404 * type carry_node *) */, \
405 tmp /* pointer to carry node (of type carry_node *), \
406 * used to make iterator stable in the face of * \
407 * deletions from the level */ ) \
408 for (node = list_entry(level->nodes.next, carry_node, header.level_linkage), \
409 tmp = list_entry(node->header.level_linkage.next, carry_node, header.level_linkage); \
410 &node->header.level_linkage != &level->nodes; \
411 node = tmp, \
412 tmp = list_entry(node->header.level_linkage.next, carry_node, header.level_linkage))
414 #if 0
415 for (node = carry_node_front(level), \
416 tmp = carry_node_next(node) ; !carry_node_end(level, node) ; \
417 node = tmp, tmp = carry_node_next(node))
418 #endif
420 /* macro to iterate over all nodes in a @level in reverse order
422 This is used, because nodes are unlocked in reversed order of locking */
423 #define for_all_nodes_back(level /* carry level (of type carry_level *) */, \
424 node /* pointer to carry node, modified by loop \
425 * (of type carry_node *) */, \
426 tmp /* pointer to carry node (of type carry_node \
427 * *), used to make iterator stable in the \
428 * face of deletions from the level */ ) \
429 for (node = carry_node_back(level), \
430 tmp = carry_node_prev(node) ; !carry_node_end(level, node) ; \
431 node = tmp, tmp = carry_node_prev(node))
433 /* __FS_REISER4_CARRY_H__ */
434 #endif
436 /* Make Linus happy.
437 Local variables:
438 c-indentation-style: "K&R"
439 mode-name: "LC"
440 c-basic-offset: 8
441 tab-width: 8
442 fill-column: 120
443 scroll-step: 1
444 End: