* better
[mascara-docs.git] / i386 / linux-2.3.21 / net / sunrpc / sched.c
blobb4e0b4b76d964d46e92952f56dff615342cab834
1 /*
2 * linux/net/sunrpc/sched.c
4 * Scheduling for synchronous and asynchronous RPC requests.
6 * Copyright (C) 1996 Olaf Kirch, <okir@monad.swb.de>
7 *
8 * TCP NFS related read + write fixes
9 * (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie>
12 #include <linux/module.h>
14 #define __KERNEL_SYSCALLS__
15 #include <linux/sched.h>
16 #include <linux/interrupt.h>
17 #include <linux/malloc.h>
18 #include <linux/unistd.h>
19 #include <linux/smp.h>
20 #include <linux/smp_lock.h>
22 #include <linux/sunrpc/clnt.h>
24 #ifdef RPC_DEBUG
25 #define RPCDBG_FACILITY RPCDBG_SCHED
26 static int rpc_task_id = 0;
27 #endif
30 * We give RPC the same get_free_pages priority as NFS
32 #define GFP_RPC GFP_NFS
34 static void __rpc_default_timer(struct rpc_task *task);
35 static void rpciod_killall(void);
38 * When an asynchronous RPC task is activated within a bottom half
39 * handler, or while executing another RPC task, it is put on
40 * schedq, and rpciod is woken up.
42 static struct rpc_wait_queue schedq = RPC_INIT_WAITQ("schedq");
45 * RPC tasks that create another task (e.g. for contacting the portmapper)
46 * will wait on this queue for their child's completion
48 static struct rpc_wait_queue childq = RPC_INIT_WAITQ("childq");
51 * RPC tasks sit here while waiting for conditions to improve.
53 static struct rpc_wait_queue delay_queue = RPC_INIT_WAITQ("delayq");
56 * All RPC tasks are linked into this list
58 static struct rpc_task * all_tasks = NULL;
61 * rpciod-related stuff
63 static DECLARE_WAIT_QUEUE_HEAD(rpciod_idle);
64 static DECLARE_WAIT_QUEUE_HEAD(rpciod_killer);
65 static DECLARE_MUTEX(rpciod_sema);
66 static unsigned int rpciod_users = 0;
67 static pid_t rpciod_pid = 0;
68 static int rpc_inhibit = 0;
71 * This is the last-ditch buffer for NFS swap requests
73 static u32 swap_buffer[PAGE_SIZE >> 2];
74 static int swap_buffer_used = 0;
77 * Add new request to wait queue.
79 * Swapper tasks always get inserted at the head of the queue.
80 * This should avoid many nasty memory deadlocks and hopefully
81 * improve overall performance.
82 * Everyone else gets appended to the queue to ensure proper FIFO behavior.
84 int
85 rpc_add_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task)
87 if (task->tk_rpcwait) {
88 if (task->tk_rpcwait != queue)
90 printk(KERN_WARNING "RPC: doubly enqueued task!\n");
91 return -EWOULDBLOCK;
93 return 0;
95 if (RPC_IS_SWAPPER(task))
96 rpc_insert_list(&queue->task, task);
97 else
98 rpc_append_list(&queue->task, task);
99 task->tk_rpcwait = queue;
101 dprintk("RPC: %4d added to queue %p \"%s\"\n",
102 task->tk_pid, queue, rpc_qname(queue));
104 return 0;
108 * Remove request from queue.
109 * Note: must be called with interrupts disabled.
111 void
112 rpc_remove_wait_queue(struct rpc_task *task)
114 struct rpc_wait_queue *queue;
116 if (!(queue = task->tk_rpcwait))
117 return;
118 rpc_remove_list(&queue->task, task);
119 task->tk_rpcwait = NULL;
121 dprintk("RPC: %4d removed from queue %p \"%s\"\n",
122 task->tk_pid, queue, rpc_qname(queue));
126 * Set up a timer for the current task.
128 inline void
129 rpc_add_timer(struct rpc_task *task, rpc_action timer)
131 unsigned long expires = jiffies + task->tk_timeout;
133 dprintk("RPC: %4d setting alarm for %lu ms\n",
134 task->tk_pid, task->tk_timeout * 1000 / HZ);
135 if (!timer)
136 timer = __rpc_default_timer;
137 if (time_before(expires, jiffies)) {
138 printk(KERN_ERR "RPC: bad timeout value %ld - setting to 10 sec!\n",
139 task->tk_timeout);
140 expires = jiffies + 10 * HZ;
142 task->tk_timer.expires = expires;
143 task->tk_timer.data = (unsigned long) task;
144 task->tk_timer.function = (void (*)(unsigned long)) timer;
145 task->tk_timer.prev = NULL;
146 task->tk_timer.next = NULL;
147 add_timer(&task->tk_timer);
151 * Delete any timer for the current task.
152 * Must be called with interrupts off.
154 inline void
155 rpc_del_timer(struct rpc_task *task)
157 if (task->tk_timeout) {
158 dprintk("RPC: %4d deleting timer\n", task->tk_pid);
159 del_timer(&task->tk_timer);
160 task->tk_timeout = 0;
165 * Make an RPC task runnable.
167 * Note: If the task is ASYNC, this must be called with
168 * interrupts disabled to protect the wait queue operation.
170 static inline void
171 rpc_make_runnable(struct rpc_task *task)
173 if (task->tk_timeout) {
174 printk(KERN_ERR "RPC: task w/ running timer in rpc_make_runnable!!\n");
175 return;
177 task->tk_flags |= RPC_TASK_RUNNING;
178 if (RPC_IS_ASYNC(task)) {
179 int status;
180 status = rpc_add_wait_queue(&schedq, task);
181 if (status)
183 printk(KERN_WARNING "RPC: failed to add task to queue: error: %d!\n", status);
184 task->tk_status = status;
186 wake_up(&rpciod_idle);
187 } else {
188 wake_up(&task->tk_wait);
194 * For other people who may need to wake the I/O daemon
195 * but should (for now) know nothing about its innards
198 void rpciod_wake_up(void)
200 if(rpciod_pid==0)
202 printk(KERN_ERR "rpciod: wot no daemon?\n");
204 wake_up(&rpciod_idle);
208 * Prepare for sleeping on a wait queue.
209 * By always appending tasks to the list we ensure FIFO behavior.
210 * NB: An RPC task will only receive interrupt-driven events as long
211 * as it's on a wait queue.
213 static void
214 __rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
215 rpc_action action, rpc_action timer)
217 unsigned long oldflags;
218 int status;
220 dprintk("RPC: %4d sleep_on(queue \"%s\" time %ld)\n", task->tk_pid,
221 rpc_qname(q), jiffies);
224 * Protect the execution below.
226 save_flags(oldflags); cli();
228 status = rpc_add_wait_queue(q, task);
229 if (status)
231 printk(KERN_WARNING "RPC: failed to add task to queue: error: %d!\n", status);
232 task->tk_status = status;
233 task->tk_flags |= RPC_TASK_RUNNING;
235 else
237 task->tk_callback = action;
238 if (task->tk_timeout)
239 rpc_add_timer(task, timer);
240 task->tk_flags &= ~RPC_TASK_RUNNING;
243 restore_flags(oldflags);
244 return;
247 void
248 rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
249 rpc_action action, rpc_action timer)
251 __rpc_sleep_on(q, task, action, timer);
255 * Wake up a single task -- must be invoked with bottom halves off.
257 * It would probably suffice to cli/sti the del_timer and remove_wait_queue
258 * operations individually.
260 static void
261 __rpc_wake_up(struct rpc_task *task)
263 dprintk("RPC: %4d __rpc_wake_up (now %ld inh %d)\n",
264 task->tk_pid, jiffies, rpc_inhibit);
266 #ifdef RPC_DEBUG
267 if (task->tk_magic != 0xf00baa) {
268 printk(KERN_ERR "RPC: attempt to wake up non-existing task!\n");
269 rpc_debug = ~0;
270 return;
272 #endif
273 rpc_del_timer(task);
274 if (task->tk_rpcwait != &schedq)
275 rpc_remove_wait_queue(task);
276 if (!RPC_IS_RUNNING(task)) {
277 task->tk_flags |= RPC_TASK_CALLBACK;
278 rpc_make_runnable(task);
280 dprintk("RPC: __rpc_wake_up done\n");
284 * Default timeout handler if none specified by user
286 static void
287 __rpc_default_timer(struct rpc_task *task)
289 dprintk("RPC: %d timeout (default timer)\n", task->tk_pid);
290 task->tk_status = -ETIMEDOUT;
291 task->tk_timeout = 0;
292 __rpc_wake_up(task);
296 * Wake up the specified task
298 void
299 rpc_wake_up_task(struct rpc_task *task)
301 unsigned long oldflags;
303 save_flags(oldflags); cli();
304 __rpc_wake_up(task);
305 restore_flags(oldflags);
309 * Wake up the next task on the wait queue.
311 struct rpc_task *
312 rpc_wake_up_next(struct rpc_wait_queue *queue)
314 unsigned long oldflags;
315 struct rpc_task *task;
317 dprintk("RPC: wake_up_next(%p \"%s\")\n", queue, rpc_qname(queue));
318 save_flags(oldflags); cli();
319 if ((task = queue->task) != 0)
320 __rpc_wake_up(task);
321 restore_flags(oldflags);
323 return task;
327 * Wake up all tasks on a queue
329 void
330 rpc_wake_up(struct rpc_wait_queue *queue)
332 unsigned long oldflags;
334 save_flags(oldflags); cli();
335 while (queue->task)
336 __rpc_wake_up(queue->task);
337 restore_flags(oldflags);
341 * Wake up all tasks on a queue, and set their status value.
343 void
344 rpc_wake_up_status(struct rpc_wait_queue *queue, int status)
346 struct rpc_task *task;
347 unsigned long oldflags;
349 save_flags(oldflags); cli();
350 while ((task = queue->task) != NULL) {
351 task->tk_status = status;
352 __rpc_wake_up(task);
354 restore_flags(oldflags);
358 * Run a task at a later time
360 static void __rpc_atrun(struct rpc_task *);
361 void
362 rpc_delay(struct rpc_task *task, unsigned long delay)
364 task->tk_timeout = delay;
365 rpc_sleep_on(&delay_queue, task, NULL, __rpc_atrun);
368 static void
369 __rpc_atrun(struct rpc_task *task)
371 task->tk_status = 0;
372 __rpc_wake_up(task);
376 * This is the RPC `scheduler' (or rather, the finite state machine).
378 static int
379 __rpc_execute(struct rpc_task *task)
381 unsigned long oldflags;
382 int status = 0;
384 dprintk("RPC: %4d rpc_execute flgs %x\n",
385 task->tk_pid, task->tk_flags);
387 if (!RPC_IS_RUNNING(task)) {
388 printk(KERN_WARNING "RPC: rpc_execute called for sleeping task!!\n");
389 return 0;
392 while (1) {
394 * Execute any pending callback.
396 if (task->tk_flags & RPC_TASK_CALLBACK) {
397 /* Define a callback save pointer */
398 void (*save_callback)(struct rpc_task *);
400 task->tk_flags &= ~RPC_TASK_CALLBACK;
402 * If a callback exists, save it, reset it,
403 * call it.
404 * The save is needed to stop from resetting
405 * another callback set within the callback handler
406 * - Dave
408 if (task->tk_callback) {
409 save_callback=task->tk_callback;
410 task->tk_callback=NULL;
411 save_callback(task);
416 * No handler for next step means exit.
418 if (!task->tk_action)
419 break;
422 * Perform the next FSM step.
423 * tk_action may be NULL when the task has been killed
424 * by someone else.
426 if (RPC_IS_RUNNING(task) && task->tk_action)
427 task->tk_action(task);
430 * Check whether task is sleeping.
431 * Note that if the task may go to sleep in tk_action,
432 * and the RPC reply arrives before we get here, it will
433 * have state RUNNING, but will still be on schedq.
435 save_flags(oldflags); cli();
436 if (RPC_IS_RUNNING(task)) {
437 if (task->tk_rpcwait == &schedq)
438 rpc_remove_wait_queue(task);
439 } else while (!RPC_IS_RUNNING(task)) {
440 if (RPC_IS_ASYNC(task)) {
441 restore_flags(oldflags);
442 return 0;
445 /* sync task: sleep here */
446 dprintk("RPC: %4d sync task going to sleep\n",
447 task->tk_pid);
448 if (current->pid == rpciod_pid)
449 printk(KERN_ERR "RPC: rpciod waiting on sync task!\n");
451 sti();
452 __wait_event(task->tk_wait, RPC_IS_RUNNING(task));
453 cli();
456 * When the task received a signal, remove from
457 * any queues etc, and make runnable again.
459 if (signalled())
460 __rpc_wake_up(task);
462 dprintk("RPC: %4d sync task resuming\n",
463 task->tk_pid);
465 restore_flags(oldflags);
468 * When a sync task receives a signal, it exits with
469 * -ERESTARTSYS. In order to catch any callbacks that
470 * clean up after sleeping on some queue, we don't
471 * break the loop here, but go around once more.
473 if (!RPC_IS_ASYNC(task) && signalled()) {
474 dprintk("RPC: %4d got signal\n", task->tk_pid);
475 rpc_exit(task, -ERESTARTSYS);
479 dprintk("RPC: %4d exit() = %d\n", task->tk_pid, task->tk_status);
480 if (task->tk_exit) {
481 status = task->tk_status;
482 task->tk_exit(task);
485 return status;
489 * User-visible entry point to the scheduler.
490 * The recursion protection is for debugging. It should go away once
491 * the code has stabilized.
493 void
494 rpc_execute(struct rpc_task *task)
496 static int executing = 0;
497 int incr = RPC_IS_ASYNC(task)? 1 : 0;
499 if (incr) {
500 if (rpc_inhibit) {
501 printk(KERN_INFO "RPC: execution inhibited!\n");
502 return;
504 if (executing)
505 printk(KERN_WARNING "RPC: %d tasks executed\n", executing);
508 executing += incr;
509 __rpc_execute(task);
510 executing -= incr;
514 * This is our own little scheduler for async RPC tasks.
516 static void
517 __rpc_schedule(void)
519 struct rpc_task *task;
520 int count = 0;
521 unsigned long oldflags;
522 int need_resched = current->need_resched;
524 dprintk("RPC: rpc_schedule enter\n");
525 save_flags(oldflags);
526 while (1) {
527 cli();
528 if (!(task = schedq.task))
529 break;
530 rpc_del_timer(task);
531 rpc_remove_wait_queue(task);
532 task->tk_flags |= RPC_TASK_RUNNING;
533 restore_flags(oldflags);
535 __rpc_execute(task);
537 if (++count >= 200) {
538 count = 0;
539 need_resched = 1;
541 if (need_resched)
542 schedule();
544 restore_flags(oldflags);
545 dprintk("RPC: rpc_schedule leave\n");
549 * Allocate memory for RPC purpose.
551 * This is yet another tricky issue: For sync requests issued by
552 * a user process, we want to make kmalloc sleep if there isn't
553 * enough memory. Async requests should not sleep too excessively
554 * because that will block rpciod (but that's not dramatic when
555 * it's starved of memory anyway). Finally, swapout requests should
556 * never sleep at all, and should not trigger another swap_out
557 * request through kmalloc which would just increase memory contention.
559 * I hope the following gets it right, which gives async requests
560 * a slight advantage over sync requests (good for writeback, debatable
561 * for readahead):
563 * sync user requests: GFP_KERNEL
564 * async requests: GFP_RPC (== GFP_NFS)
565 * swap requests: GFP_ATOMIC (or new GFP_SWAPPER)
567 void *
568 rpc_allocate(unsigned int flags, unsigned int size)
570 u32 *buffer;
571 int gfp;
573 if (flags & RPC_TASK_SWAPPER)
574 gfp = GFP_ATOMIC;
575 else if (flags & RPC_TASK_ASYNC)
576 gfp = GFP_RPC;
577 else
578 gfp = GFP_KERNEL;
580 do {
581 if ((buffer = (u32 *) kmalloc(size, gfp)) != NULL) {
582 dprintk("RPC: allocated buffer %p\n", buffer);
583 return buffer;
585 if ((flags & RPC_TASK_SWAPPER) && !swap_buffer_used++) {
586 dprintk("RPC: used last-ditch swap buffer\n");
587 return swap_buffer;
589 if (flags & RPC_TASK_ASYNC)
590 return NULL;
591 current->state = TASK_INTERRUPTIBLE;
592 schedule_timeout(HZ>>4);
593 } while (!signalled());
595 return NULL;
598 void
599 rpc_free(void *buffer)
601 if (buffer != swap_buffer) {
602 kfree(buffer);
603 return;
605 swap_buffer_used = 0;
609 * Creation and deletion of RPC task structures
611 inline void
612 rpc_init_task(struct rpc_task *task, struct rpc_clnt *clnt,
613 rpc_action callback, int flags)
615 memset(task, 0, sizeof(*task));
616 task->tk_client = clnt;
617 task->tk_flags = RPC_TASK_RUNNING | flags;
618 task->tk_exit = callback;
619 init_waitqueue_head(&task->tk_wait);
620 if (current->uid != current->fsuid || current->gid != current->fsgid)
621 task->tk_flags |= RPC_TASK_SETUID;
623 /* Initialize retry counters */
624 task->tk_garb_retry = 2;
625 task->tk_cred_retry = 2;
626 task->tk_suid_retry = 1;
628 /* Add to global list of all tasks */
629 task->tk_next_task = all_tasks;
630 task->tk_prev_task = NULL;
631 if (all_tasks)
632 all_tasks->tk_prev_task = task;
633 all_tasks = task;
635 if (clnt)
636 clnt->cl_users++;
638 #ifdef RPC_DEBUG
639 task->tk_magic = 0xf00baa;
640 task->tk_pid = rpc_task_id++;
641 #endif
642 dprintk("RPC: %4d new task procpid %d\n", task->tk_pid,
643 current->pid);
647 * Create a new task for the specified client. We have to
648 * clean up after an allocation failure, as the client may
649 * have specified "oneshot".
651 struct rpc_task *
652 rpc_new_task(struct rpc_clnt *clnt, rpc_action callback, int flags)
654 struct rpc_task *task;
656 task = (struct rpc_task *) rpc_allocate(flags, sizeof(*task));
657 if (!task)
658 goto cleanup;
660 rpc_init_task(task, clnt, callback, flags);
662 dprintk("RPC: %4d allocated task\n", task->tk_pid);
663 task->tk_flags |= RPC_TASK_DYNAMIC;
664 out:
665 return task;
667 cleanup:
668 /* Check whether to release the client */
669 if (clnt) {
670 printk("rpc_new_task: failed, users=%d, oneshot=%d\n",
671 clnt->cl_users, clnt->cl_oneshot);
672 clnt->cl_users++; /* pretend we were used ... */
673 rpc_release_client(clnt);
675 goto out;
678 void
679 rpc_release_task(struct rpc_task *task)
681 struct rpc_task *next, *prev;
683 dprintk("RPC: %4d release task\n", task->tk_pid);
685 /* Remove from global task list */
686 prev = task->tk_prev_task;
687 next = task->tk_next_task;
688 if (next)
689 next->tk_prev_task = prev;
690 if (prev)
691 prev->tk_next_task = next;
692 else
693 all_tasks = next;
695 /* Release resources */
696 if (task->tk_rqstp)
697 xprt_release(task);
698 if (task->tk_cred)
699 rpcauth_releasecred(task);
700 if (task->tk_buffer) {
701 rpc_free(task->tk_buffer);
702 task->tk_buffer = NULL;
704 if (task->tk_client) {
705 rpc_release_client(task->tk_client);
706 task->tk_client = NULL;
709 #ifdef RPC_DEBUG
710 task->tk_magic = 0;
711 #endif
713 if (task->tk_flags & RPC_TASK_DYNAMIC) {
714 dprintk("RPC: %4d freeing task\n", task->tk_pid);
715 task->tk_flags &= ~RPC_TASK_DYNAMIC;
716 rpc_free(task);
721 * Handling of RPC child tasks
722 * We can't simply call wake_up(parent) here, because the
723 * parent task may already have gone away
725 static inline struct rpc_task *
726 rpc_find_parent(struct rpc_task *child)
728 struct rpc_task *temp, *parent;
730 parent = (struct rpc_task *) child->tk_calldata;
731 for (temp = childq.task; temp; temp = temp->tk_next) {
732 if (temp == parent)
733 return parent;
735 return NULL;
738 static void
739 rpc_child_exit(struct rpc_task *child)
741 struct rpc_task *parent;
743 if ((parent = rpc_find_parent(child)) != NULL) {
744 parent->tk_status = child->tk_status;
745 rpc_wake_up_task(parent);
747 rpc_release_task(child);
751 * Note: rpc_new_task releases the client after a failure.
753 struct rpc_task *
754 rpc_new_child(struct rpc_clnt *clnt, struct rpc_task *parent)
756 struct rpc_task *task;
758 task = rpc_new_task(clnt, NULL, RPC_TASK_ASYNC | RPC_TASK_CHILD);
759 if (!task)
760 goto fail;
761 task->tk_exit = rpc_child_exit;
762 task->tk_calldata = parent;
763 return task;
765 fail:
766 parent->tk_status = -ENOMEM;
767 return NULL;
770 void
771 rpc_run_child(struct rpc_task *task, struct rpc_task *child, rpc_action func)
773 unsigned long oldflags;
775 save_flags(oldflags); cli();
776 rpc_make_runnable(child);
777 restore_flags(oldflags);
778 /* N.B. Is it possible for the child to have already finished? */
779 rpc_sleep_on(&childq, task, func, NULL);
783 * Kill all tasks for the given client.
784 * XXX: kill their descendants as well?
786 void
787 rpc_killall_tasks(struct rpc_clnt *clnt)
789 struct rpc_task **q, *rovr;
791 dprintk("RPC: killing all tasks for client %p\n", clnt);
792 /* N.B. Why bother to inhibit? Nothing blocks here ... */
793 rpc_inhibit++;
794 for (q = &all_tasks; (rovr = *q); q = &rovr->tk_next_task) {
795 if (!clnt || rovr->tk_client == clnt) {
796 rovr->tk_flags |= RPC_TASK_KILLED;
797 rpc_exit(rovr, -EIO);
798 rpc_wake_up_task(rovr);
801 rpc_inhibit--;
804 static DECLARE_MUTEX_LOCKED(rpciod_running);
807 * This is the rpciod kernel thread
809 static int
810 rpciod(void *ptr)
812 wait_queue_head_t *assassin = (wait_queue_head_t*) ptr;
813 unsigned long oldflags;
814 int rounds = 0;
816 MOD_INC_USE_COUNT;
817 lock_kernel();
819 * Let our maker know we're running ...
821 rpciod_pid = current->pid;
822 up(&rpciod_running);
824 exit_files(current);
825 exit_mm(current);
827 spin_lock_irq(&current->sigmask_lock);
828 siginitsetinv(&current->blocked, sigmask(SIGKILL));
829 recalc_sigpending(current);
830 spin_unlock_irq(&current->sigmask_lock);
832 current->session = 1;
833 current->pgrp = 1;
834 sprintf(current->comm, "rpciod");
836 dprintk("RPC: rpciod starting (pid %d)\n", rpciod_pid);
837 while (rpciod_users) {
838 if (signalled()) {
839 rpciod_killall();
840 flush_signals(current);
842 __rpc_schedule();
844 if (++rounds >= 64) { /* safeguard */
845 schedule();
846 rounds = 0;
848 save_flags(oldflags); cli();
849 dprintk("RPC: rpciod running checking dispatch\n");
850 rpciod_tcp_dispatcher();
852 if (!schedq.task) {
853 dprintk("RPC: rpciod back to sleep\n");
854 interruptible_sleep_on(&rpciod_idle);
855 dprintk("RPC: switch to rpciod\n");
856 rpciod_tcp_dispatcher();
857 rounds = 0;
859 restore_flags(oldflags);
862 dprintk("RPC: rpciod shutdown commences\n");
863 if (all_tasks) {
864 printk(KERN_ERR "rpciod: active tasks at shutdown?!\n");
865 rpciod_killall();
868 rpciod_pid = 0;
869 wake_up(assassin);
871 dprintk("RPC: rpciod exiting\n");
872 MOD_DEC_USE_COUNT;
873 return 0;
876 static void
877 rpciod_killall(void)
879 unsigned long flags;
881 while (all_tasks) {
882 current->sigpending = 0;
883 rpc_killall_tasks(NULL);
884 __rpc_schedule();
885 if (all_tasks) {
886 dprintk("rpciod_killall: waiting for tasks to exit\n");
887 current->state = TASK_INTERRUPTIBLE;
888 schedule_timeout(1);
892 spin_lock_irqsave(&current->sigmask_lock, flags);
893 recalc_sigpending(current);
894 spin_unlock_irqrestore(&current->sigmask_lock, flags);
898 * Start up the rpciod process if it's not already running.
901 rpciod_up(void)
903 int error = 0;
905 MOD_INC_USE_COUNT;
906 down(&rpciod_sema);
907 dprintk("rpciod_up: pid %d, users %d\n", rpciod_pid, rpciod_users);
908 rpciod_users++;
909 if (rpciod_pid)
910 goto out;
912 * If there's no pid, we should be the first user.
914 if (rpciod_users > 1)
915 printk(KERN_WARNING "rpciod_up: no pid, %d users??\n", rpciod_users);
917 * Create the rpciod thread and wait for it to start.
919 error = kernel_thread(rpciod, &rpciod_killer, 0);
920 if (error < 0) {
921 printk(KERN_WARNING "rpciod_up: create thread failed, error=%d\n", error);
922 rpciod_users--;
923 goto out;
925 down(&rpciod_running);
926 error = 0;
927 out:
928 up(&rpciod_sema);
929 MOD_DEC_USE_COUNT;
930 return error;
933 void
934 rpciod_down(void)
936 unsigned long flags;
938 MOD_INC_USE_COUNT;
939 down(&rpciod_sema);
940 dprintk("rpciod_down pid %d sema %d\n", rpciod_pid, rpciod_users);
941 if (rpciod_users) {
942 if (--rpciod_users)
943 goto out;
944 } else
945 printk(KERN_WARNING "rpciod_down: pid=%d, no users??\n", rpciod_pid);
947 if (!rpciod_pid) {
948 dprintk("rpciod_down: Nothing to do!\n");
949 goto out;
952 kill_proc(rpciod_pid, SIGKILL, 1);
954 * Usually rpciod will exit very quickly, so we
955 * wait briefly before checking the process id.
957 current->sigpending = 0;
958 current->state = TASK_INTERRUPTIBLE;
959 schedule_timeout(1);
961 * Display a message if we're going to wait longer.
963 while (rpciod_pid) {
964 dprintk("rpciod_down: waiting for pid %d to exit\n", rpciod_pid);
965 if (signalled()) {
966 dprintk("rpciod_down: caught signal\n");
967 break;
969 interruptible_sleep_on(&rpciod_killer);
971 spin_lock_irqsave(&current->sigmask_lock, flags);
972 recalc_sigpending(current);
973 spin_unlock_irqrestore(&current->sigmask_lock, flags);
974 out:
975 up(&rpciod_sema);
976 MOD_DEC_USE_COUNT;
979 #ifdef RPC_DEBUG
980 #include <linux/nfs_fs.h>
981 void rpc_show_tasks(void)
983 struct rpc_task *t = all_tasks, *next;
984 struct nfs_wreq *wreq;
986 if (!t)
987 return;
988 printk("-pid- proc flgs status -client- -prog- --rqstp- -timeout "
989 "-rpcwait -action- --exit--\n");
990 for (; t; t = next) {
991 next = t->tk_next_task;
992 printk("%05d %04d %04x %06d %8p %6d %8p %08ld %8s %8p %8p\n",
993 t->tk_pid, t->tk_proc, t->tk_flags, t->tk_status,
994 t->tk_client, t->tk_client->cl_prog,
995 t->tk_rqstp, t->tk_timeout,
996 t->tk_rpcwait ? rpc_qname(t->tk_rpcwait) : " <NULL> ",
997 t->tk_action, t->tk_exit);
999 if (!(t->tk_flags & RPC_TASK_NFSWRITE))
1000 continue;
1001 /* NFS write requests */
1002 wreq = (struct nfs_wreq *) t->tk_calldata;
1003 printk(" NFS: flgs=%08x, pid=%d, pg=%p, off=(%d, %d)\n",
1004 wreq->wb_flags, wreq->wb_pid, wreq->wb_page,
1005 wreq->wb_offset, wreq->wb_bytes);
1006 printk(" name=%s/%s\n",
1007 wreq->wb_file->f_dentry->d_parent->d_name.name,
1008 wreq->wb_file->f_dentry->d_name.name);
1011 #endif