On Tue, Nov 06, 2007 at 02:33:53AM -0800, akpm@linux-foundation.org wrote:
[mmotm.git] / fs / reiser4 / entd.c
blob4ea64a84884e979dd14c05b273577cb873083a65
1 /* Copyright 2003, 2004 by Hans Reiser, licensing governed by
2 * reiser4/README */
4 /* Ent daemon. */
6 #include "debug.h"
7 #include "txnmgr.h"
8 #include "tree.h"
9 #include "entd.h"
10 #include "super.h"
11 #include "context.h"
12 #include "reiser4.h"
13 #include "vfs_ops.h"
14 #include "page_cache.h"
15 #include "inode.h"
17 #include <linux/sched.h> /* struct task_struct */
18 #include <linux/suspend.h>
19 #include <linux/kernel.h>
20 #include <linux/writeback.h>
21 #include <linux/time.h> /* INITIAL_JIFFIES */
22 #include <linux/backing-dev.h> /* bdi_write_congested */
23 #include <linux/wait.h>
24 #include <linux/kthread.h>
25 #include <linux/freezer.h>
27 #define DEF_PRIORITY 12
28 #define MAX_ENTD_ITERS 10
30 static void entd_flush(struct super_block *, struct wbq *);
31 static int entd(void *arg);
34 * set ->comm field of end thread to make its state visible to the user level
36 #define entd_set_comm(state) \
37 snprintf(current->comm, sizeof(current->comm), \
38 "ent:%s%s", super->s_id, (state))
40 /**
41 * reiser4_init_entd - initialize entd context and start kernel daemon
42 * @super: super block to start ent thread for
44 * Creates entd contexts, starts kernel thread and waits until it
45 * initializes.
47 int reiser4_init_entd(struct super_block *super)
49 entd_context *ctx;
51 assert("nikita-3104", super != NULL);
53 ctx = get_entd_context(super);
55 memset(ctx, 0, sizeof *ctx);
56 spin_lock_init(&ctx->guard);
57 init_waitqueue_head(&ctx->wait);
58 #if REISER4_DEBUG
59 INIT_LIST_HEAD(&ctx->flushers_list);
60 #endif
61 /* lists of writepage requests */
62 INIT_LIST_HEAD(&ctx->todo_list);
63 INIT_LIST_HEAD(&ctx->done_list);
64 /* start entd */
65 ctx->tsk = kthread_run(entd, super, "ent:%s", super->s_id);
66 if (IS_ERR(ctx->tsk))
67 return PTR_ERR(ctx->tsk);
68 return 0;
71 static void put_wbq(struct wbq *rq)
73 iput(rq->mapping->host);
74 complete(&rq->completion);
77 /* ent should be locked */
78 static struct wbq *__get_wbq(entd_context * ent)
80 struct wbq *wbq;
82 if (list_empty(&ent->todo_list))
83 return NULL;
85 ent->nr_todo_reqs--;
86 wbq = list_entry(ent->todo_list.next, struct wbq, link);
87 list_del_init(&wbq->link);
88 return wbq;
91 /* ent thread function */
92 static int entd(void *arg)
94 struct super_block *super;
95 entd_context *ent;
96 int done = 0;
98 super = arg;
99 /* do_fork() just copies task_struct into the new
100 thread. ->fs_context shouldn't be copied of course. This shouldn't
101 be a problem for the rest of the code though.
103 current->journal_info = NULL;
105 ent = get_entd_context(super);
107 while (!done) {
108 try_to_freeze();
110 spin_lock(&ent->guard);
111 while (ent->nr_todo_reqs != 0) {
112 struct wbq *rq;
114 assert("", list_empty(&ent->done_list));
116 /* take request from the queue head */
117 rq = __get_wbq(ent);
118 assert("", rq != NULL);
119 ent->cur_request = rq;
120 spin_unlock(&ent->guard);
122 entd_set_comm("!");
123 entd_flush(super, rq);
125 put_wbq(rq);
128 * wakeup all requestors and iput their inodes
130 spin_lock(&ent->guard);
131 while (!list_empty(&ent->done_list)) {
132 rq = list_entry(ent->done_list.next, struct wbq, link);
133 list_del_init(&rq->link);
134 ent->nr_done_reqs--;
135 spin_unlock(&ent->guard);
136 assert("", rq->written == 1);
137 put_wbq(rq);
138 spin_lock(&ent->guard);
141 spin_unlock(&ent->guard);
143 entd_set_comm(".");
146 DEFINE_WAIT(__wait);
148 do {
149 prepare_to_wait(&ent->wait, &__wait, TASK_INTERRUPTIBLE);
150 if (kthread_should_stop()) {
151 done = 1;
152 break;
154 if (ent->nr_todo_reqs != 0)
155 break;
156 schedule();
157 } while (0);
158 finish_wait(&ent->wait, &__wait);
161 BUG_ON(ent->nr_todo_reqs != 0);
162 return 0;
166 * reiser4_done_entd - stop entd kernel thread
167 * @super: super block to stop ent thread for
169 * It is called on umount. Sends stop signal to entd and wait until it handles
170 * it.
172 void reiser4_done_entd(struct super_block *super)
174 entd_context *ent;
176 assert("nikita-3103", super != NULL);
178 ent = get_entd_context(super);
179 assert("zam-1055", ent->tsk != NULL);
180 kthread_stop(ent->tsk);
183 /* called at the beginning of jnode_flush to register flusher thread with ent
184 * daemon */
185 void reiser4_enter_flush(struct super_block *super)
187 entd_context *ent;
189 assert("zam-1029", super != NULL);
190 ent = get_entd_context(super);
192 assert("zam-1030", ent != NULL);
194 spin_lock(&ent->guard);
195 ent->flushers++;
196 #if REISER4_DEBUG
197 list_add(&get_current_context()->flushers_link, &ent->flushers_list);
198 #endif
199 spin_unlock(&ent->guard);
202 /* called at the end of jnode_flush */
203 void reiser4_leave_flush(struct super_block *super)
205 entd_context *ent;
206 int wake_up_ent;
208 assert("zam-1027", super != NULL);
209 ent = get_entd_context(super);
211 assert("zam-1028", ent != NULL);
213 spin_lock(&ent->guard);
214 ent->flushers--;
215 wake_up_ent = (ent->flushers == 0 && ent->nr_todo_reqs != 0);
216 #if REISER4_DEBUG
217 list_del_init(&get_current_context()->flushers_link);
218 #endif
219 spin_unlock(&ent->guard);
220 if (wake_up_ent)
221 wake_up_process(ent->tsk);
224 #define ENTD_CAPTURE_APAGE_BURST SWAP_CLUSTER_MAX
226 static void entd_flush(struct super_block *super, struct wbq *rq)
228 reiser4_context ctx;
229 int tmp;
231 init_stack_context(&ctx, super);
232 ctx.entd = 1;
233 ctx.gfp_mask = GFP_NOFS;
235 rq->wbc->range_start = page_offset(rq->page);
236 rq->wbc->range_end = rq->wbc->range_start +
237 (ENTD_CAPTURE_APAGE_BURST << PAGE_CACHE_SHIFT);
238 tmp = rq->wbc->nr_to_write;
239 rq->mapping->a_ops->writepages(rq->mapping, rq->wbc);
241 if (rq->wbc->nr_to_write > 0) {
242 rq->wbc->range_start = 0;
243 rq->wbc->range_end = LLONG_MAX;
244 generic_sync_sb_inodes(rq->wbc);
246 rq->wbc->nr_to_write = ENTD_CAPTURE_APAGE_BURST;
247 reiser4_writeout(super, rq->wbc);
249 context_set_commit_async(&ctx);
250 reiser4_exit_context(&ctx);
254 * write_page_by_ent - ask entd thread to flush this page as part of slum
255 * @page: page to be written
256 * @wbc: writeback control passed to reiser4_writepage
258 * Creates a request, puts it on entd list of requests, wakeups entd if
259 * necessary, waits until entd completes with the request.
261 int write_page_by_ent(struct page *page, struct writeback_control *wbc)
263 struct super_block *sb;
264 struct inode *inode;
265 entd_context *ent;
266 struct wbq rq;
268 assert("", PageLocked(page));
269 assert("", page->mapping != NULL);
271 sb = page->mapping->host->i_sb;
272 ent = get_entd_context(sb);
273 assert("", ent && ent->done == 0);
276 * we are going to unlock page and ask ent thread to write the
277 * page. Re-dirty page before unlocking so that if ent thread fails to
278 * write it - it will remain dirty
280 set_page_dirty_notag(page);
283 * pin inode in memory, unlock page, entd_flush will iput. We can not
284 * iput here becasue we can not allow delete_inode to be called here
286 inode = igrab(page->mapping->host);
287 unlock_page(page);
288 if (inode == NULL)
289 /* inode is getting freed */
290 return 0;
292 /* init wbq */
293 INIT_LIST_HEAD(&rq.link);
294 rq.magic = WBQ_MAGIC;
295 rq.wbc = wbc;
296 rq.page = page;
297 rq.mapping = inode->i_mapping;
298 rq.node = NULL;
299 rq.written = 0;
300 init_completion(&rq.completion);
302 /* add request to entd's list of writepage requests */
303 spin_lock(&ent->guard);
304 ent->nr_todo_reqs++;
305 list_add_tail(&rq.link, &ent->todo_list);
306 if (ent->nr_todo_reqs == 1)
307 wake_up_process(ent->tsk);
309 spin_unlock(&ent->guard);
311 /* wait until entd finishes */
312 wait_for_completion(&rq.completion);
314 if (rq.written)
315 /* Eventually ENTD has written the page to disk. */
316 return 0;
317 return 0;
320 int wbq_available(void)
322 struct super_block *sb = reiser4_get_current_sb();
323 entd_context *ent = get_entd_context(sb);
324 return ent->nr_todo_reqs;
328 * Local variables:
329 * c-indentation-style: "K&R"
330 * mode-name: "LC"
331 * c-basic-offset: 8
332 * tab-width: 8
333 * fill-column: 79
334 * End: