1 /* Copyright 2001, 2002, 2003, 2004 by Hans Reiser, licensing governed by
4 /* Reiser4 context. See context.c for details. */
6 #if !defined( __REISER4_CONTEXT_H__ )
7 #define __REISER4_CONTEXT_H__
15 #include <linux/types.h> /* for __u?? */
16 #include <linux/fs.h> /* for struct super_block */
17 #include <linux/spinlock.h>
18 #include <linux/sched.h> /* for struct task_struct */
20 /* reiser4 per-thread context */
21 struct reiser4_context
{
22 /* magic constant. For identification of reiser4 contexts. */
25 /* current lock stack. See lock.[ch]. This is where list of all
26 locks taken by current thread is kept. This is also used in
27 deadlock detection. */
30 /* current transcrash. */
32 /* transaction handle embedded into reiser4_context. ->trans points
34 txn_handle trans_in_ctx
;
36 /* super block we are working with. To get the current tree
37 use &get_super_private (reiser4_get_current_sb ())->tree. */
38 struct super_block
*super
;
40 /* parent fs activation */
41 struct fs_activation
*outer
;
43 /* per-thread grabbed (for further allocation) blocks counter */
44 reiser4_block_nr grabbed_blocks
;
46 /* list of taps currently monitored. See tap.c */
47 struct list_head taps
;
49 /* grabbing space is enabled */
50 unsigned int grab_enabled
:1;
51 /* should be set when we are write dirty nodes to disk in jnode_flush or
52 * reiser4_write_logs() */
53 unsigned int writeout_mode
:1;
54 /* true, if current thread is an ent thread */
56 /* true, if balance_dirty_pages() should not be run when leaving this
57 * context. This is used to avoid lengthly balance_dirty_pages()
58 * operation when holding some important resource, like directory
60 unsigned int nobalance
:1;
62 /* this bit is used on reiser4_done_context to decide whether context is
63 kmalloc-ed and has to be kfree-ed */
64 unsigned int on_stack
:1;
66 /* count non-trivial jnode_set_dirty() calls */
67 unsigned long nr_marked_dirty
;
69 /* reiser4_sync_inodes calls (via generic_sync_sb_inodes)
70 * reiser4_writepages for each of dirty inodes. Reiser4_writepages
71 * captures pages. When number of pages captured in one
72 * reiser4_sync_inodes reaches some threshold - some atoms get
75 int nr_children
; /* number of child contexts */
77 /* debugging information about reiser4 locks held by the current
79 reiser4_lock_cnt_info locks
;
80 struct task_struct
*task
; /* so we can easily find owner of the stack */
83 * disk space grabbing debugging support
85 /* how many disk blocks were grabbed by the first call to
86 * reiser4_grab_space() in this context */
87 reiser4_block_nr grabbed_initially
;
89 /* list of all threads doing flush currently */
90 struct list_head flushers_link
;
91 /* information about last error encountered by reiser4 */
98 extern reiser4_context
*get_context_by_lock_stack(lock_stack
*);
100 /* Debugging helps. */
102 extern void print_contexts(void);
105 #define current_tree (&(get_super_private(reiser4_get_current_sb())->tree))
106 #define current_blocksize reiser4_get_current_sb()->s_blocksize
107 #define current_blocksize_bits reiser4_get_current_sb()->s_blocksize_bits
109 extern reiser4_context
*reiser4_init_context(struct super_block
*);
110 extern void init_stack_context(reiser4_context
*, struct super_block
*);
111 extern void reiser4_exit_context(reiser4_context
*);
113 /* magic constant we store in reiser4_context allocated at the stack. Used to
114 catch accesses to staled or uninitialized contexts. */
115 #define context_magic ((__u32) 0x4b1b5d0b)
117 extern int is_in_reiser4_context(void);
120 * return reiser4_context for the thread @tsk
122 static inline reiser4_context
*get_context(const struct task_struct
*tsk
)
125 ((reiser4_context
*) tsk
->journal_info
)->magic
== context_magic
);
126 return (reiser4_context
*) tsk
->journal_info
;
130 * return reiser4 context of the current thread, or NULL if there is none.
132 static inline reiser4_context
*get_current_context_check(void)
134 if (is_in_reiser4_context())
135 return get_context(current
);
140 static inline reiser4_context
*get_current_context(void); /* __attribute__((const)); */
142 /* return context associated with current thread */
143 static inline reiser4_context
*get_current_context(void)
145 return get_context(current
);
148 static inline gfp_t
reiser4_ctx_gfp_mask_get(void)
150 reiser4_context
*ctx
;
152 ctx
= get_current_context_check();
153 return (ctx
== NULL
) ? GFP_KERNEL
: ctx
->gfp_mask
;
156 void reiser4_ctx_gfp_mask_set(void);
157 void reiser4_ctx_gfp_mask_force (gfp_t mask
);
160 * true if current thread is in the write-out mode. Thread enters write-out
161 * mode during jnode_flush and reiser4_write_logs().
163 static inline int is_writeout_mode(void)
165 return get_current_context()->writeout_mode
;
169 * enter write-out mode
171 static inline void writeout_mode_enable(void)
173 assert("zam-941", !get_current_context()->writeout_mode
);
174 get_current_context()->writeout_mode
= 1;
178 * leave write-out mode
180 static inline void writeout_mode_disable(void)
182 assert("zam-942", get_current_context()->writeout_mode
);
183 get_current_context()->writeout_mode
= 0;
186 static inline void grab_space_enable(void)
188 get_current_context()->grab_enabled
= 1;
191 static inline void grab_space_disable(void)
193 get_current_context()->grab_enabled
= 0;
196 static inline void grab_space_set_enabled(int enabled
)
198 get_current_context()->grab_enabled
= enabled
;
201 static inline int is_grab_enabled(reiser4_context
* ctx
)
203 return ctx
->grab_enabled
;
206 /* mark transaction handle in @ctx as TXNH_DONT_COMMIT, so that no commit or
207 * flush would be performed when it is closed. This is necessary when handle
208 * has to be closed under some coarse semaphore, like i_mutex of
209 * directory. Commit will be performed by ktxnmgrd. */
210 static inline void context_set_commit_async(reiser4_context
* context
)
212 context
->nobalance
= 1;
213 context
->trans
->flags
|= TXNH_DONT_COMMIT
;
216 /* __REISER4_CONTEXT_H__ */
221 c-indentation-style: "K&R"