2 * linux/net/sunrpc/sched.c
4 * Scheduling for synchronous and asynchronous RPC requests.
6 * Copyright (C) 1996 Olaf Kirch, <okir@monad.swb.de>
8 * TCP NFS related read + write fixes
9 * (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie>
12 #include <linux/module.h>
14 #define __KERNEL_SYSCALLS__
15 #include <linux/sched.h>
16 #include <linux/interrupt.h>
17 #include <linux/malloc.h>
18 #include <linux/unistd.h>
19 #include <linux/smp.h>
20 #include <linux/smp_lock.h>
22 #include <linux/sunrpc/clnt.h>
25 #define RPCDBG_FACILITY RPCDBG_SCHED
26 static int rpc_task_id
= 0;
30 * We give RPC the same get_free_pages priority as NFS
32 #define GFP_RPC GFP_NFS
34 static void __rpc_default_timer(struct rpc_task
*task
);
35 static void rpciod_killall(void);
38 * When an asynchronous RPC task is activated within a bottom half
39 * handler, or while executing another RPC task, it is put on
40 * schedq, and rpciod is woken up.
42 static struct rpc_wait_queue schedq
= RPC_INIT_WAITQ("schedq");
45 * RPC tasks that create another task (e.g. for contacting the portmapper)
46 * will wait on this queue for their child's completion
48 static struct rpc_wait_queue childq
= RPC_INIT_WAITQ("childq");
51 * RPC tasks sit here while waiting for conditions to improve.
53 static struct rpc_wait_queue delay_queue
= RPC_INIT_WAITQ("delayq");
56 * All RPC tasks are linked into this list
58 static struct rpc_task
* all_tasks
= NULL
;
61 * rpciod-related stuff
63 static DECLARE_WAIT_QUEUE_HEAD(rpciod_idle
);
64 static DECLARE_WAIT_QUEUE_HEAD(rpciod_killer
);
65 static DECLARE_MUTEX(rpciod_sema
);
66 static unsigned int rpciod_users
= 0;
67 static pid_t rpciod_pid
= 0;
68 static int rpc_inhibit
= 0;
71 * This is the last-ditch buffer for NFS swap requests
73 static u32 swap_buffer
[PAGE_SIZE
>> 2];
74 static int swap_buffer_used
= 0;
77 * Add new request to wait queue.
79 * Swapper tasks always get inserted at the head of the queue.
80 * This should avoid many nasty memory deadlocks and hopefully
81 * improve overall performance.
82 * Everyone else gets appended to the queue to ensure proper FIFO behavior.
85 rpc_add_wait_queue(struct rpc_wait_queue
*queue
, struct rpc_task
*task
)
87 if (task
->tk_rpcwait
) {
88 if (task
->tk_rpcwait
!= queue
)
90 printk(KERN_WARNING
"RPC: doubly enqueued task!\n");
95 if (RPC_IS_SWAPPER(task
))
96 rpc_insert_list(&queue
->task
, task
);
98 rpc_append_list(&queue
->task
, task
);
99 task
->tk_rpcwait
= queue
;
101 dprintk("RPC: %4d added to queue %p \"%s\"\n",
102 task
->tk_pid
, queue
, rpc_qname(queue
));
108 * Remove request from queue.
109 * Note: must be called with interrupts disabled.
112 rpc_remove_wait_queue(struct rpc_task
*task
)
114 struct rpc_wait_queue
*queue
;
116 if (!(queue
= task
->tk_rpcwait
))
118 rpc_remove_list(&queue
->task
, task
);
119 task
->tk_rpcwait
= NULL
;
121 dprintk("RPC: %4d removed from queue %p \"%s\"\n",
122 task
->tk_pid
, queue
, rpc_qname(queue
));
126 * Set up a timer for the current task.
129 rpc_add_timer(struct rpc_task
*task
, rpc_action timer
)
131 unsigned long expires
= jiffies
+ task
->tk_timeout
;
133 dprintk("RPC: %4d setting alarm for %lu ms\n",
134 task
->tk_pid
, task
->tk_timeout
* 1000 / HZ
);
136 timer
= __rpc_default_timer
;
137 if (time_before(expires
, jiffies
)) {
138 printk(KERN_ERR
"RPC: bad timeout value %ld - setting to 10 sec!\n",
140 expires
= jiffies
+ 10 * HZ
;
142 task
->tk_timer
.expires
= expires
;
143 task
->tk_timer
.data
= (unsigned long) task
;
144 task
->tk_timer
.function
= (void (*)(unsigned long)) timer
;
145 task
->tk_timer
.prev
= NULL
;
146 task
->tk_timer
.next
= NULL
;
147 add_timer(&task
->tk_timer
);
151 * Delete any timer for the current task.
152 * Must be called with interrupts off.
155 rpc_del_timer(struct rpc_task
*task
)
157 if (task
->tk_timeout
) {
158 dprintk("RPC: %4d deleting timer\n", task
->tk_pid
);
159 del_timer(&task
->tk_timer
);
160 task
->tk_timeout
= 0;
165 * Make an RPC task runnable.
167 * Note: If the task is ASYNC, this must be called with
168 * interrupts disabled to protect the wait queue operation.
171 rpc_make_runnable(struct rpc_task
*task
)
173 if (task
->tk_timeout
) {
174 printk(KERN_ERR
"RPC: task w/ running timer in rpc_make_runnable!!\n");
177 task
->tk_flags
|= RPC_TASK_RUNNING
;
178 if (RPC_IS_ASYNC(task
)) {
180 status
= rpc_add_wait_queue(&schedq
, task
);
183 printk(KERN_WARNING
"RPC: failed to add task to queue: error: %d!\n", status
);
184 task
->tk_status
= status
;
186 wake_up(&rpciod_idle
);
188 wake_up(&task
->tk_wait
);
194 * For other people who may need to wake the I/O daemon
195 * but should (for now) know nothing about its innards
198 void rpciod_wake_up(void)
202 printk(KERN_ERR
"rpciod: wot no daemon?\n");
204 wake_up(&rpciod_idle
);
208 * Prepare for sleeping on a wait queue.
209 * By always appending tasks to the list we ensure FIFO behavior.
210 * NB: An RPC task will only receive interrupt-driven events as long
211 * as it's on a wait queue.
214 __rpc_sleep_on(struct rpc_wait_queue
*q
, struct rpc_task
*task
,
215 rpc_action action
, rpc_action timer
)
217 unsigned long oldflags
;
220 dprintk("RPC: %4d sleep_on(queue \"%s\" time %ld)\n", task
->tk_pid
,
221 rpc_qname(q
), jiffies
);
224 * Protect the execution below.
226 save_flags(oldflags
); cli();
228 status
= rpc_add_wait_queue(q
, task
);
231 printk(KERN_WARNING
"RPC: failed to add task to queue: error: %d!\n", status
);
232 task
->tk_status
= status
;
233 task
->tk_flags
|= RPC_TASK_RUNNING
;
237 task
->tk_callback
= action
;
238 if (task
->tk_timeout
)
239 rpc_add_timer(task
, timer
);
240 task
->tk_flags
&= ~RPC_TASK_RUNNING
;
243 restore_flags(oldflags
);
248 rpc_sleep_on(struct rpc_wait_queue
*q
, struct rpc_task
*task
,
249 rpc_action action
, rpc_action timer
)
251 __rpc_sleep_on(q
, task
, action
, timer
);
255 * Wake up a single task -- must be invoked with bottom halves off.
257 * It would probably suffice to cli/sti the del_timer and remove_wait_queue
258 * operations individually.
261 __rpc_wake_up(struct rpc_task
*task
)
263 dprintk("RPC: %4d __rpc_wake_up (now %ld inh %d)\n",
264 task
->tk_pid
, jiffies
, rpc_inhibit
);
267 if (task
->tk_magic
!= 0xf00baa) {
268 printk(KERN_ERR
"RPC: attempt to wake up non-existing task!\n");
274 if (task
->tk_rpcwait
!= &schedq
)
275 rpc_remove_wait_queue(task
);
276 if (!RPC_IS_RUNNING(task
)) {
277 task
->tk_flags
|= RPC_TASK_CALLBACK
;
278 rpc_make_runnable(task
);
280 dprintk("RPC: __rpc_wake_up done\n");
284 * Default timeout handler if none specified by user
287 __rpc_default_timer(struct rpc_task
*task
)
289 dprintk("RPC: %d timeout (default timer)\n", task
->tk_pid
);
290 task
->tk_status
= -ETIMEDOUT
;
291 task
->tk_timeout
= 0;
296 * Wake up the specified task
299 rpc_wake_up_task(struct rpc_task
*task
)
301 unsigned long oldflags
;
303 save_flags(oldflags
); cli();
305 restore_flags(oldflags
);
309 * Wake up the next task on the wait queue.
312 rpc_wake_up_next(struct rpc_wait_queue
*queue
)
314 unsigned long oldflags
;
315 struct rpc_task
*task
;
317 dprintk("RPC: wake_up_next(%p \"%s\")\n", queue
, rpc_qname(queue
));
318 save_flags(oldflags
); cli();
319 if ((task
= queue
->task
) != 0)
321 restore_flags(oldflags
);
327 * Wake up all tasks on a queue
330 rpc_wake_up(struct rpc_wait_queue
*queue
)
332 unsigned long oldflags
;
334 save_flags(oldflags
); cli();
336 __rpc_wake_up(queue
->task
);
337 restore_flags(oldflags
);
341 * Wake up all tasks on a queue, and set their status value.
344 rpc_wake_up_status(struct rpc_wait_queue
*queue
, int status
)
346 struct rpc_task
*task
;
347 unsigned long oldflags
;
349 save_flags(oldflags
); cli();
350 while ((task
= queue
->task
) != NULL
) {
351 task
->tk_status
= status
;
354 restore_flags(oldflags
);
358 * Run a task at a later time
360 static void __rpc_atrun(struct rpc_task
*);
362 rpc_delay(struct rpc_task
*task
, unsigned long delay
)
364 task
->tk_timeout
= delay
;
365 rpc_sleep_on(&delay_queue
, task
, NULL
, __rpc_atrun
);
369 __rpc_atrun(struct rpc_task
*task
)
376 * This is the RPC `scheduler' (or rather, the finite state machine).
379 __rpc_execute(struct rpc_task
*task
)
381 unsigned long oldflags
;
384 dprintk("RPC: %4d rpc_execute flgs %x\n",
385 task
->tk_pid
, task
->tk_flags
);
387 if (!RPC_IS_RUNNING(task
)) {
388 printk(KERN_WARNING
"RPC: rpc_execute called for sleeping task!!\n");
394 * Execute any pending callback.
396 if (task
->tk_flags
& RPC_TASK_CALLBACK
) {
397 /* Define a callback save pointer */
398 void (*save_callback
)(struct rpc_task
*);
400 task
->tk_flags
&= ~RPC_TASK_CALLBACK
;
402 * If a callback exists, save it, reset it,
404 * The save is needed to stop from resetting
405 * another callback set within the callback handler
408 if (task
->tk_callback
) {
409 save_callback
=task
->tk_callback
;
410 task
->tk_callback
=NULL
;
416 * No handler for next step means exit.
418 if (!task
->tk_action
)
422 * Perform the next FSM step.
423 * tk_action may be NULL when the task has been killed
426 if (RPC_IS_RUNNING(task
) && task
->tk_action
)
427 task
->tk_action(task
);
430 * Check whether task is sleeping.
431 * Note that if the task may go to sleep in tk_action,
432 * and the RPC reply arrives before we get here, it will
433 * have state RUNNING, but will still be on schedq.
435 save_flags(oldflags
); cli();
436 if (RPC_IS_RUNNING(task
)) {
437 if (task
->tk_rpcwait
== &schedq
)
438 rpc_remove_wait_queue(task
);
439 } else while (!RPC_IS_RUNNING(task
)) {
440 if (RPC_IS_ASYNC(task
)) {
441 restore_flags(oldflags
);
445 /* sync task: sleep here */
446 dprintk("RPC: %4d sync task going to sleep\n",
448 if (current
->pid
== rpciod_pid
)
449 printk(KERN_ERR
"RPC: rpciod waiting on sync task!\n");
452 __wait_event(task
->tk_wait
, RPC_IS_RUNNING(task
));
456 * When the task received a signal, remove from
457 * any queues etc, and make runnable again.
462 dprintk("RPC: %4d sync task resuming\n",
465 restore_flags(oldflags
);
468 * When a sync task receives a signal, it exits with
469 * -ERESTARTSYS. In order to catch any callbacks that
470 * clean up after sleeping on some queue, we don't
471 * break the loop here, but go around once more.
473 if (!RPC_IS_ASYNC(task
) && signalled()) {
474 dprintk("RPC: %4d got signal\n", task
->tk_pid
);
475 rpc_exit(task
, -ERESTARTSYS
);
479 dprintk("RPC: %4d exit() = %d\n", task
->tk_pid
, task
->tk_status
);
481 status
= task
->tk_status
;
489 * User-visible entry point to the scheduler.
490 * The recursion protection is for debugging. It should go away once
491 * the code has stabilized.
494 rpc_execute(struct rpc_task
*task
)
496 static int executing
= 0;
497 int incr
= RPC_IS_ASYNC(task
)? 1 : 0;
501 printk(KERN_INFO
"RPC: execution inhibited!\n");
505 printk(KERN_WARNING
"RPC: %d tasks executed\n", executing
);
514 * This is our own little scheduler for async RPC tasks.
519 struct rpc_task
*task
;
521 unsigned long oldflags
;
522 int need_resched
= current
->need_resched
;
524 dprintk("RPC: rpc_schedule enter\n");
525 save_flags(oldflags
);
528 if (!(task
= schedq
.task
))
531 rpc_remove_wait_queue(task
);
532 task
->tk_flags
|= RPC_TASK_RUNNING
;
533 restore_flags(oldflags
);
537 if (++count
>= 200) {
544 restore_flags(oldflags
);
545 dprintk("RPC: rpc_schedule leave\n");
549 * Allocate memory for RPC purpose.
551 * This is yet another tricky issue: For sync requests issued by
552 * a user process, we want to make kmalloc sleep if there isn't
553 * enough memory. Async requests should not sleep too excessively
554 * because that will block rpciod (but that's not dramatic when
555 * it's starved of memory anyway). Finally, swapout requests should
556 * never sleep at all, and should not trigger another swap_out
557 * request through kmalloc which would just increase memory contention.
559 * I hope the following gets it right, which gives async requests
560 * a slight advantage over sync requests (good for writeback, debatable
563 * sync user requests: GFP_KERNEL
564 * async requests: GFP_RPC (== GFP_NFS)
565 * swap requests: GFP_ATOMIC (or new GFP_SWAPPER)
568 rpc_allocate(unsigned int flags
, unsigned int size
)
573 if (flags
& RPC_TASK_SWAPPER
)
575 else if (flags
& RPC_TASK_ASYNC
)
581 if ((buffer
= (u32
*) kmalloc(size
, gfp
)) != NULL
) {
582 dprintk("RPC: allocated buffer %p\n", buffer
);
585 if ((flags
& RPC_TASK_SWAPPER
) && !swap_buffer_used
++) {
586 dprintk("RPC: used last-ditch swap buffer\n");
589 if (flags
& RPC_TASK_ASYNC
)
591 current
->state
= TASK_INTERRUPTIBLE
;
592 schedule_timeout(HZ
>>4);
593 } while (!signalled());
599 rpc_free(void *buffer
)
601 if (buffer
!= swap_buffer
) {
605 swap_buffer_used
= 0;
609 * Creation and deletion of RPC task structures
612 rpc_init_task(struct rpc_task
*task
, struct rpc_clnt
*clnt
,
613 rpc_action callback
, int flags
)
615 memset(task
, 0, sizeof(*task
));
616 task
->tk_client
= clnt
;
617 task
->tk_flags
= RPC_TASK_RUNNING
| flags
;
618 task
->tk_exit
= callback
;
619 init_waitqueue_head(&task
->tk_wait
);
620 if (current
->uid
!= current
->fsuid
|| current
->gid
!= current
->fsgid
)
621 task
->tk_flags
|= RPC_TASK_SETUID
;
623 /* Initialize retry counters */
624 task
->tk_garb_retry
= 2;
625 task
->tk_cred_retry
= 2;
626 task
->tk_suid_retry
= 1;
628 /* Add to global list of all tasks */
629 task
->tk_next_task
= all_tasks
;
630 task
->tk_prev_task
= NULL
;
632 all_tasks
->tk_prev_task
= task
;
639 task
->tk_magic
= 0xf00baa;
640 task
->tk_pid
= rpc_task_id
++;
642 dprintk("RPC: %4d new task procpid %d\n", task
->tk_pid
,
647 * Create a new task for the specified client. We have to
648 * clean up after an allocation failure, as the client may
649 * have specified "oneshot".
652 rpc_new_task(struct rpc_clnt
*clnt
, rpc_action callback
, int flags
)
654 struct rpc_task
*task
;
656 task
= (struct rpc_task
*) rpc_allocate(flags
, sizeof(*task
));
660 rpc_init_task(task
, clnt
, callback
, flags
);
662 dprintk("RPC: %4d allocated task\n", task
->tk_pid
);
663 task
->tk_flags
|= RPC_TASK_DYNAMIC
;
668 /* Check whether to release the client */
670 printk("rpc_new_task: failed, users=%d, oneshot=%d\n",
671 clnt
->cl_users
, clnt
->cl_oneshot
);
672 clnt
->cl_users
++; /* pretend we were used ... */
673 rpc_release_client(clnt
);
679 rpc_release_task(struct rpc_task
*task
)
681 struct rpc_task
*next
, *prev
;
683 dprintk("RPC: %4d release task\n", task
->tk_pid
);
685 /* Remove from global task list */
686 prev
= task
->tk_prev_task
;
687 next
= task
->tk_next_task
;
689 next
->tk_prev_task
= prev
;
691 prev
->tk_next_task
= next
;
695 /* Release resources */
699 rpcauth_releasecred(task
);
700 if (task
->tk_buffer
) {
701 rpc_free(task
->tk_buffer
);
702 task
->tk_buffer
= NULL
;
704 if (task
->tk_client
) {
705 rpc_release_client(task
->tk_client
);
706 task
->tk_client
= NULL
;
713 if (task
->tk_flags
& RPC_TASK_DYNAMIC
) {
714 dprintk("RPC: %4d freeing task\n", task
->tk_pid
);
715 task
->tk_flags
&= ~RPC_TASK_DYNAMIC
;
721 * Handling of RPC child tasks
722 * We can't simply call wake_up(parent) here, because the
723 * parent task may already have gone away
725 static inline struct rpc_task
*
726 rpc_find_parent(struct rpc_task
*child
)
728 struct rpc_task
*temp
, *parent
;
730 parent
= (struct rpc_task
*) child
->tk_calldata
;
731 for (temp
= childq
.task
; temp
; temp
= temp
->tk_next
) {
739 rpc_child_exit(struct rpc_task
*child
)
741 struct rpc_task
*parent
;
743 if ((parent
= rpc_find_parent(child
)) != NULL
) {
744 parent
->tk_status
= child
->tk_status
;
745 rpc_wake_up_task(parent
);
747 rpc_release_task(child
);
751 * Note: rpc_new_task releases the client after a failure.
754 rpc_new_child(struct rpc_clnt
*clnt
, struct rpc_task
*parent
)
756 struct rpc_task
*task
;
758 task
= rpc_new_task(clnt
, NULL
, RPC_TASK_ASYNC
| RPC_TASK_CHILD
);
761 task
->tk_exit
= rpc_child_exit
;
762 task
->tk_calldata
= parent
;
766 parent
->tk_status
= -ENOMEM
;
771 rpc_run_child(struct rpc_task
*task
, struct rpc_task
*child
, rpc_action func
)
773 unsigned long oldflags
;
775 save_flags(oldflags
); cli();
776 rpc_make_runnable(child
);
777 restore_flags(oldflags
);
778 /* N.B. Is it possible for the child to have already finished? */
779 rpc_sleep_on(&childq
, task
, func
, NULL
);
783 * Kill all tasks for the given client.
784 * XXX: kill their descendants as well?
787 rpc_killall_tasks(struct rpc_clnt
*clnt
)
789 struct rpc_task
**q
, *rovr
;
791 dprintk("RPC: killing all tasks for client %p\n", clnt
);
792 /* N.B. Why bother to inhibit? Nothing blocks here ... */
794 for (q
= &all_tasks
; (rovr
= *q
); q
= &rovr
->tk_next_task
) {
795 if (!clnt
|| rovr
->tk_client
== clnt
) {
796 rovr
->tk_flags
|= RPC_TASK_KILLED
;
797 rpc_exit(rovr
, -EIO
);
798 rpc_wake_up_task(rovr
);
804 static DECLARE_MUTEX_LOCKED(rpciod_running
);
807 * This is the rpciod kernel thread
812 wait_queue_head_t
*assassin
= (wait_queue_head_t
*) ptr
;
813 unsigned long oldflags
;
819 * Let our maker know we're running ...
821 rpciod_pid
= current
->pid
;
827 spin_lock_irq(¤t
->sigmask_lock
);
828 siginitsetinv(¤t
->blocked
, sigmask(SIGKILL
));
829 recalc_sigpending(current
);
830 spin_unlock_irq(¤t
->sigmask_lock
);
832 current
->session
= 1;
834 sprintf(current
->comm
, "rpciod");
836 dprintk("RPC: rpciod starting (pid %d)\n", rpciod_pid
);
837 while (rpciod_users
) {
840 flush_signals(current
);
844 if (++rounds
>= 64) { /* safeguard */
848 save_flags(oldflags
); cli();
849 dprintk("RPC: rpciod running checking dispatch\n");
850 rpciod_tcp_dispatcher();
853 dprintk("RPC: rpciod back to sleep\n");
854 interruptible_sleep_on(&rpciod_idle
);
855 dprintk("RPC: switch to rpciod\n");
856 rpciod_tcp_dispatcher();
859 restore_flags(oldflags
);
862 dprintk("RPC: rpciod shutdown commences\n");
864 printk(KERN_ERR
"rpciod: active tasks at shutdown?!\n");
871 dprintk("RPC: rpciod exiting\n");
882 current
->sigpending
= 0;
883 rpc_killall_tasks(NULL
);
886 dprintk("rpciod_killall: waiting for tasks to exit\n");
887 current
->state
= TASK_INTERRUPTIBLE
;
892 spin_lock_irqsave(¤t
->sigmask_lock
, flags
);
893 recalc_sigpending(current
);
894 spin_unlock_irqrestore(¤t
->sigmask_lock
, flags
);
898 * Start up the rpciod process if it's not already running.
907 dprintk("rpciod_up: pid %d, users %d\n", rpciod_pid
, rpciod_users
);
912 * If there's no pid, we should be the first user.
914 if (rpciod_users
> 1)
915 printk(KERN_WARNING
"rpciod_up: no pid, %d users??\n", rpciod_users
);
917 * Create the rpciod thread and wait for it to start.
919 error
= kernel_thread(rpciod
, &rpciod_killer
, 0);
921 printk(KERN_WARNING
"rpciod_up: create thread failed, error=%d\n", error
);
925 down(&rpciod_running
);
940 dprintk("rpciod_down pid %d sema %d\n", rpciod_pid
, rpciod_users
);
945 printk(KERN_WARNING
"rpciod_down: pid=%d, no users??\n", rpciod_pid
);
948 dprintk("rpciod_down: Nothing to do!\n");
952 kill_proc(rpciod_pid
, SIGKILL
, 1);
954 * Usually rpciod will exit very quickly, so we
955 * wait briefly before checking the process id.
957 current
->sigpending
= 0;
958 current
->state
= TASK_INTERRUPTIBLE
;
961 * Display a message if we're going to wait longer.
964 dprintk("rpciod_down: waiting for pid %d to exit\n", rpciod_pid
);
966 dprintk("rpciod_down: caught signal\n");
969 interruptible_sleep_on(&rpciod_killer
);
971 spin_lock_irqsave(¤t
->sigmask_lock
, flags
);
972 recalc_sigpending(current
);
973 spin_unlock_irqrestore(¤t
->sigmask_lock
, flags
);
980 #include <linux/nfs_fs.h>
981 void rpc_show_tasks(void)
983 struct rpc_task
*t
= all_tasks
, *next
;
984 struct nfs_wreq
*wreq
;
988 printk("-pid- proc flgs status -client- -prog- --rqstp- -timeout "
989 "-rpcwait -action- --exit--\n");
990 for (; t
; t
= next
) {
991 next
= t
->tk_next_task
;
992 printk("%05d %04d %04x %06d %8p %6d %8p %08ld %8s %8p %8p\n",
993 t
->tk_pid
, t
->tk_proc
, t
->tk_flags
, t
->tk_status
,
994 t
->tk_client
, t
->tk_client
->cl_prog
,
995 t
->tk_rqstp
, t
->tk_timeout
,
996 t
->tk_rpcwait
? rpc_qname(t
->tk_rpcwait
) : " <NULL> ",
997 t
->tk_action
, t
->tk_exit
);
999 if (!(t
->tk_flags
& RPC_TASK_NFSWRITE
))
1001 /* NFS write requests */
1002 wreq
= (struct nfs_wreq
*) t
->tk_calldata
;
1003 printk(" NFS: flgs=%08x, pid=%d, pg=%p, off=(%d, %d)\n",
1004 wreq
->wb_flags
, wreq
->wb_pid
, wreq
->wb_page
,
1005 wreq
->wb_offset
, wreq
->wb_bytes
);
1006 printk(" name=%s/%s\n",
1007 wreq
->wb_file
->f_dentry
->d_parent
->d_name
.name
,
1008 wreq
->wb_file
->f_dentry
->d_name
.name
);