1 /* Copyright 2003, 2004 by Hans Reiser, licensing governed by
14 #include "page_cache.h"
17 #include <linux/sched.h> /* struct task_struct */
18 #include <linux/suspend.h>
19 #include <linux/kernel.h>
20 #include <linux/writeback.h>
21 #include <linux/time.h> /* INITIAL_JIFFIES */
22 #include <linux/backing-dev.h> /* bdi_write_congested */
23 #include <linux/wait.h>
24 #include <linux/kthread.h>
25 #include <linux/freezer.h>
27 #define DEF_PRIORITY 12
28 #define MAX_ENTD_ITERS 10
30 static void entd_flush(struct super_block
*, struct wbq
*);
31 static int entd(void *arg
);
34 * set ->comm field of end thread to make its state visible to the user level
36 #define entd_set_comm(state) \
37 snprintf(current->comm, sizeof(current->comm), \
38 "ent:%s%s", super->s_id, (state))
41 * reiser4_init_entd - initialize entd context and start kernel daemon
42 * @super: super block to start ent thread for
44 * Creates entd contexts, starts kernel thread and waits until it
47 int reiser4_init_entd(struct super_block
*super
)
51 assert("nikita-3104", super
!= NULL
);
53 ctx
= get_entd_context(super
);
55 memset(ctx
, 0, sizeof *ctx
);
56 spin_lock_init(&ctx
->guard
);
57 init_waitqueue_head(&ctx
->wait
);
59 INIT_LIST_HEAD(&ctx
->flushers_list
);
61 /* lists of writepage requests */
62 INIT_LIST_HEAD(&ctx
->todo_list
);
63 INIT_LIST_HEAD(&ctx
->done_list
);
65 ctx
->tsk
= kthread_run(entd
, super
, "ent:%s", super
->s_id
);
67 return PTR_ERR(ctx
->tsk
);
71 static void put_wbq(struct wbq
*rq
)
73 iput(rq
->mapping
->host
);
74 complete(&rq
->completion
);
77 /* ent should be locked */
78 static struct wbq
*__get_wbq(entd_context
* ent
)
82 if (list_empty(&ent
->todo_list
))
86 wbq
= list_entry(ent
->todo_list
.next
, struct wbq
, link
);
87 list_del_init(&wbq
->link
);
91 /* ent thread function */
92 static int entd(void *arg
)
94 struct super_block
*super
;
99 /* do_fork() just copies task_struct into the new
100 thread. ->fs_context shouldn't be copied of course. This shouldn't
101 be a problem for the rest of the code though.
103 current
->journal_info
= NULL
;
105 ent
= get_entd_context(super
);
110 spin_lock(&ent
->guard
);
111 while (ent
->nr_todo_reqs
!= 0) {
114 assert("", list_empty(&ent
->done_list
));
116 /* take request from the queue head */
118 assert("", rq
!= NULL
);
119 ent
->cur_request
= rq
;
120 spin_unlock(&ent
->guard
);
123 entd_flush(super
, rq
);
128 * wakeup all requestors and iput their inodes
130 spin_lock(&ent
->guard
);
131 while (!list_empty(&ent
->done_list
)) {
132 rq
= list_entry(ent
->done_list
.next
, struct wbq
, link
);
133 list_del_init(&rq
->link
);
135 spin_unlock(&ent
->guard
);
136 assert("", rq
->written
== 1);
138 spin_lock(&ent
->guard
);
141 spin_unlock(&ent
->guard
);
149 prepare_to_wait(&ent
->wait
, &__wait
, TASK_INTERRUPTIBLE
);
150 if (kthread_should_stop()) {
154 if (ent
->nr_todo_reqs
!= 0)
158 finish_wait(&ent
->wait
, &__wait
);
161 BUG_ON(ent
->nr_todo_reqs
!= 0);
166 * reiser4_done_entd - stop entd kernel thread
167 * @super: super block to stop ent thread for
169 * It is called on umount. Sends stop signal to entd and wait until it handles
172 void reiser4_done_entd(struct super_block
*super
)
176 assert("nikita-3103", super
!= NULL
);
178 ent
= get_entd_context(super
);
179 assert("zam-1055", ent
->tsk
!= NULL
);
180 kthread_stop(ent
->tsk
);
183 /* called at the beginning of jnode_flush to register flusher thread with ent
185 void reiser4_enter_flush(struct super_block
*super
)
189 assert("zam-1029", super
!= NULL
);
190 ent
= get_entd_context(super
);
192 assert("zam-1030", ent
!= NULL
);
194 spin_lock(&ent
->guard
);
197 list_add(&get_current_context()->flushers_link
, &ent
->flushers_list
);
199 spin_unlock(&ent
->guard
);
202 /* called at the end of jnode_flush */
203 void reiser4_leave_flush(struct super_block
*super
)
208 assert("zam-1027", super
!= NULL
);
209 ent
= get_entd_context(super
);
211 assert("zam-1028", ent
!= NULL
);
213 spin_lock(&ent
->guard
);
215 wake_up_ent
= (ent
->flushers
== 0 && ent
->nr_todo_reqs
!= 0);
217 list_del_init(&get_current_context()->flushers_link
);
219 spin_unlock(&ent
->guard
);
221 wake_up_process(ent
->tsk
);
224 #define ENTD_CAPTURE_APAGE_BURST SWAP_CLUSTER_MAX
226 static void entd_flush(struct super_block
*super
, struct wbq
*rq
)
231 init_stack_context(&ctx
, super
);
233 ctx
.gfp_mask
= GFP_NOFS
;
235 rq
->wbc
->range_start
= page_offset(rq
->page
);
236 rq
->wbc
->range_end
= rq
->wbc
->range_start
+
237 (ENTD_CAPTURE_APAGE_BURST
<< PAGE_CACHE_SHIFT
);
238 tmp
= rq
->wbc
->nr_to_write
;
239 rq
->mapping
->a_ops
->writepages(rq
->mapping
, rq
->wbc
);
241 if (rq
->wbc
->nr_to_write
> 0) {
242 rq
->wbc
->range_start
= 0;
243 rq
->wbc
->range_end
= LLONG_MAX
;
244 generic_sync_sb_inodes(rq
->wbc
);
246 rq
->wbc
->nr_to_write
= ENTD_CAPTURE_APAGE_BURST
;
247 reiser4_writeout(super
, rq
->wbc
);
249 context_set_commit_async(&ctx
);
250 reiser4_exit_context(&ctx
);
254 * write_page_by_ent - ask entd thread to flush this page as part of slum
255 * @page: page to be written
256 * @wbc: writeback control passed to reiser4_writepage
258 * Creates a request, puts it on entd list of requests, wakeups entd if
259 * necessary, waits until entd completes with the request.
261 int write_page_by_ent(struct page
*page
, struct writeback_control
*wbc
)
263 struct super_block
*sb
;
268 assert("", PageLocked(page
));
269 assert("", page
->mapping
!= NULL
);
271 sb
= page
->mapping
->host
->i_sb
;
272 ent
= get_entd_context(sb
);
273 assert("", ent
&& ent
->done
== 0);
276 * we are going to unlock page and ask ent thread to write the
277 * page. Re-dirty page before unlocking so that if ent thread fails to
278 * write it - it will remain dirty
280 set_page_dirty_notag(page
);
283 * pin inode in memory, unlock page, entd_flush will iput. We can not
284 * iput here becasue we can not allow delete_inode to be called here
286 inode
= igrab(page
->mapping
->host
);
289 /* inode is getting freed */
293 INIT_LIST_HEAD(&rq
.link
);
294 rq
.magic
= WBQ_MAGIC
;
297 rq
.mapping
= inode
->i_mapping
;
300 init_completion(&rq
.completion
);
302 /* add request to entd's list of writepage requests */
303 spin_lock(&ent
->guard
);
305 list_add_tail(&rq
.link
, &ent
->todo_list
);
306 if (ent
->nr_todo_reqs
== 1)
307 wake_up_process(ent
->tsk
);
309 spin_unlock(&ent
->guard
);
311 /* wait until entd finishes */
312 wait_for_completion(&rq
.completion
);
315 /* Eventually ENTD has written the page to disk. */
320 int wbq_available(void)
322 struct super_block
*sb
= reiser4_get_current_sb();
323 entd_context
*ent
= get_entd_context(sb
);
324 return ent
->nr_todo_reqs
;
329 * c-indentation-style: "K&R"