2 * linux/net/sunrpc/rpcclnt.c
4 * This file contains the high-level RPC interface.
5 * It is modeled as a finite state machine to support both synchronous
6 * and asynchronous requests.
8 * - RPC header generation and argument serialization.
9 * - Credential refresh.
10 * - TCP reconnect handling (when finished).
11 * - Retry of operation when it is suspected the operation failed because
12 * of uid squashing on the server, or when the credentials were stale
13 * and need to be refreshed, or when a packet was damaged in transit.
14 * This may be have to be moved to the VFS layer.
16 * NB: BSD uses a more intelligent approach to guessing when a request
17 * or reply has been lost by keeping the RTO estimate for each procedure.
18 * We currently make do with a constant timeout value.
20 * Copyright (C) 1992,1993 Rick Sladkey <jrs@world.std.com>
21 * Copyright (C) 1995,1996 Olaf Kirch <okir@monad.swb.de>
24 #include <asm/system.h>
25 #include <asm/segment.h>
27 #include <linux/types.h>
29 #include <linux/malloc.h>
31 #include <linux/utsname.h>
33 #include <linux/sunrpc/clnt.h>
36 #define RPC_SLACK_SPACE 1024 /* total overkill */
39 # define RPCDBG_FACILITY RPCDBG_CALL
42 static DECLARE_WAIT_QUEUE_HEAD(destroy_wait
);
45 static void call_bind(struct rpc_task
*task
);
46 static void call_reserve(struct rpc_task
*task
);
47 static void call_reserveresult(struct rpc_task
*task
);
48 static void call_allocate(struct rpc_task
*task
);
49 static void call_encode(struct rpc_task
*task
);
50 static void call_decode(struct rpc_task
*task
);
51 static void call_transmit(struct rpc_task
*task
);
52 static void call_receive(struct rpc_task
*task
);
53 static void call_status(struct rpc_task
*task
);
54 static void call_refresh(struct rpc_task
*task
);
55 static void call_refreshresult(struct rpc_task
*task
);
56 static void call_timeout(struct rpc_task
*task
);
57 static void call_reconnect(struct rpc_task
*task
);
58 static u32
* call_header(struct rpc_task
*task
);
59 static u32
* call_verify(struct rpc_task
*task
);
63 * Create an RPC client
64 * FIXME: This should also take a flags argument (as in task->tk_flags).
65 * It's called (among others) from pmap_create_client, which may in
66 * turn be called by an async task. In this case, rpciod should not be
67 * made to sleep too long.
70 rpc_create_client(struct rpc_xprt
*xprt
, char *servname
,
71 struct rpc_program
*program
, u32 vers
, int flavor
)
73 struct rpc_version
*version
;
74 struct rpc_clnt
*clnt
= NULL
;
76 dprintk("RPC: creating %s client for %s (xprt %p)\n",
77 program
->name
, servname
, xprt
);
81 if (vers
>= program
->nrvers
|| !(version
= program
->version
[vers
]))
84 clnt
= (struct rpc_clnt
*) rpc_allocate(0, sizeof(*clnt
));
87 memset(clnt
, 0, sizeof(*clnt
));
90 clnt
->cl_procinfo
= version
->procs
;
91 clnt
->cl_maxproc
= version
->nrprocs
;
92 clnt
->cl_server
= servname
;
93 clnt
->cl_protname
= program
->name
;
94 clnt
->cl_port
= xprt
->addr
.sin_port
;
95 clnt
->cl_prog
= program
->number
;
96 clnt
->cl_vers
= version
->number
;
97 clnt
->cl_prot
= IPPROTO_UDP
;
98 clnt
->cl_stats
= program
->stats
;
99 clnt
->cl_bindwait
= RPC_INIT_WAITQ("bindwait");
102 clnt
->cl_autobind
= 1;
104 if (!rpcauth_create(flavor
, clnt
))
107 /* save the nodename */
108 clnt
->cl_nodelen
= strlen(system_utsname
.nodename
);
109 if (clnt
->cl_nodelen
> UNX_MAXNODENAME
)
110 clnt
->cl_nodelen
= UNX_MAXNODENAME
;
111 memcpy(clnt
->cl_nodename
, system_utsname
.nodename
, clnt
->cl_nodelen
);
116 printk("RPC: out of memory in rpc_create_client\n");
119 printk("RPC: Couldn't create auth handle (flavor %d)\n",
127 * Properly shut down an RPC client, terminating all outstanding
128 * requests. Note that we must be certain that cl_oneshot and
129 * cl_dead are cleared, or else the client would be destroyed
130 * when the last task releases it.
133 rpc_shutdown_client(struct rpc_clnt
*clnt
)
135 dprintk("RPC: shutting down %s client for %s\n",
136 clnt
->cl_protname
, clnt
->cl_server
);
137 while (clnt
->cl_users
) {
139 printk("rpc_shutdown_client: client %s, tasks=%d\n",
140 clnt
->cl_protname
, clnt
->cl_users
);
142 /* Don't let rpc_release_client destroy us */
143 clnt
->cl_oneshot
= 0;
145 rpc_killall_tasks(clnt
);
146 sleep_on(&destroy_wait
);
148 return rpc_destroy_client(clnt
);
152 * Delete an RPC client
155 rpc_destroy_client(struct rpc_clnt
*clnt
)
157 dprintk("RPC: destroying %s client for %s\n",
158 clnt
->cl_protname
, clnt
->cl_server
);
161 rpcauth_destroy(clnt
->cl_auth
);
162 clnt
->cl_auth
= NULL
;
165 xprt_destroy(clnt
->cl_xprt
);
166 clnt
->cl_xprt
= NULL
;
173 * Release an RPC client
176 rpc_release_client(struct rpc_clnt
*clnt
)
178 dprintk("RPC: rpc_release_client(%p, %d)\n",
179 clnt
, clnt
->cl_users
);
180 if (clnt
->cl_users
) {
181 if (--(clnt
->cl_users
) > 0)
184 printk("rpc_release_client: %s client already free??\n",
187 wake_up(&destroy_wait
);
188 if (clnt
->cl_oneshot
|| clnt
->cl_dead
)
189 rpc_destroy_client(clnt
);
193 * Default callback for async RPC calls
196 rpc_default_callback(struct rpc_task
*task
)
198 rpc_release_task(task
);
202 * Export the signal mask handling for aysnchronous code that
203 * sleeps on RPC calls
206 void rpc_clnt_sigmask(struct rpc_clnt
*clnt
, sigset_t
*oldset
)
208 unsigned long sigallow
= sigmask(SIGKILL
);
209 unsigned long irqflags
;
211 /* Turn off various signals */
213 struct k_sigaction
*action
= current
->sig
->action
;
214 if (action
[SIGINT
-1].sa
.sa_handler
== SIG_DFL
)
215 sigallow
|= sigmask(SIGINT
);
216 if (action
[SIGQUIT
-1].sa
.sa_handler
== SIG_DFL
)
217 sigallow
|= sigmask(SIGQUIT
);
219 spin_lock_irqsave(¤t
->sigmask_lock
, irqflags
);
220 *oldset
= current
->blocked
;
221 siginitsetinv(¤t
->blocked
, sigallow
& ~oldset
->sig
[0]);
222 recalc_sigpending(current
);
223 spin_unlock_irqrestore(¤t
->sigmask_lock
, irqflags
);
226 void rpc_clnt_sigunmask(struct rpc_clnt
*clnt
, sigset_t
*oldset
)
228 unsigned long irqflags
;
230 spin_lock_irqsave(¤t
->sigmask_lock
, irqflags
);
231 current
->blocked
= *oldset
;
232 recalc_sigpending(current
);
233 spin_unlock_irqrestore(¤t
->sigmask_lock
, irqflags
);
237 * New rpc_call implementation
240 rpc_do_call(struct rpc_clnt
*clnt
, u32 proc
, void *argp
, void *resp
,
241 int flags
, rpc_action func
, void *data
)
243 struct rpc_task my_task
, *task
= &my_task
;
247 /* If this client is slain all further I/O fails */
251 rpc_clnt_sigmask(clnt
, &oldset
);
253 /* Create/initialize a new RPC task */
254 if ((async
= (flags
& RPC_TASK_ASYNC
)) != 0) {
256 func
= rpc_default_callback
;
258 if (!(task
= rpc_new_task(clnt
, func
, flags
)))
260 task
->tk_calldata
= data
;
262 rpc_init_task(task
, clnt
, NULL
, flags
);
265 /* Bind the user cred, set up the call info struct and
266 * execute the task */
267 if (rpcauth_lookupcred(task
) != NULL
) {
268 rpc_call_setup(task
, proc
, argp
, resp
, 0);
275 status
= task
->tk_status
;
276 rpc_release_task(task
);
280 rpc_clnt_sigunmask(clnt
, &oldset
);
287 rpc_call_setup(struct rpc_task
*task
, u32 proc
,
288 void *argp
, void *resp
, int flags
)
290 task
->tk_action
= call_bind
;
291 task
->tk_proc
= proc
;
292 task
->tk_argp
= argp
;
293 task
->tk_resp
= resp
;
294 task
->tk_flags
|= flags
;
296 /* Increment call count */
297 rpcproc_count(task
->tk_client
, proc
)++;
301 * Restart an (async) RPC call. Usually called from within the
305 rpc_restart_call(struct rpc_task
*task
)
307 if (task
->tk_flags
& RPC_TASK_KILLED
) {
308 rpc_release_task(task
);
311 task
->tk_action
= call_bind
;
312 rpcproc_count(task
->tk_client
, task
->tk_proc
)++;
316 * 0. Get the server port number if not yet set
319 call_bind(struct rpc_task
*task
)
321 struct rpc_clnt
*clnt
= task
->tk_client
;
323 task
->tk_action
= call_reserve
;
326 rpc_getport(task
, clnt
);
330 * 1. Reserve an RPC call slot
333 call_reserve(struct rpc_task
*task
)
335 struct rpc_clnt
*clnt
= task
->tk_client
;
337 dprintk("RPC: %4d call_reserve\n", task
->tk_pid
);
338 if (!clnt
->cl_port
) {
339 printk(KERN_NOTICE
"%s: couldn't bind to server %s - %s.\n",
340 clnt
->cl_protname
, clnt
->cl_server
,
341 clnt
->cl_softrtry
? "giving up" : "retrying");
342 if (!clnt
->cl_softrtry
) {
343 rpc_delay(task
, 5*HZ
);
346 rpc_exit(task
, -EIO
);
349 if (!rpcauth_uptodatecred(task
)) {
350 task
->tk_action
= call_refresh
;
353 task
->tk_action
= call_reserveresult
;
354 task
->tk_timeout
= clnt
->cl_timeout
.to_resrvval
;
356 clnt
->cl_stats
->rpccnt
++;
361 * 1b. Grok the result of xprt_reserve()
364 call_reserveresult(struct rpc_task
*task
)
366 dprintk("RPC: %4d call_reserveresult (status %d)\n",
367 task
->tk_pid
, task
->tk_status
);
369 * After a call to xprt_reserve(), we must have either
370 * a request slot or else an error status.
372 if ((task
->tk_status
>= 0 && !task
->tk_rqstp
) ||
373 (task
->tk_status
< 0 && task
->tk_rqstp
))
374 printk("call_reserveresult: status=%d, request=%p??\n",
375 task
->tk_status
, task
->tk_rqstp
);
377 if (task
->tk_status
>= 0) {
378 task
->tk_action
= call_allocate
;
380 } else if (task
->tk_status
== -EAGAIN
) {
381 task
->tk_timeout
= task
->tk_client
->cl_timeout
.to_resrvval
;
385 } else if (task
->tk_status
== -ETIMEDOUT
) {
386 dprintk("RPC: task timed out\n");
387 task
->tk_action
= call_timeout
;
390 task
->tk_action
= NULL
;
392 if (!task
->tk_rqstp
) {
393 printk("RPC: task has no request, exit EIO\n");
394 rpc_exit(task
, -EIO
);
401 * 2. Allocate the buffer. For details, see sched.c:rpc_malloc.
402 * (Note: buffer memory is freed in rpc_task_release).
405 call_allocate(struct rpc_task
*task
)
407 struct rpc_clnt
*clnt
= task
->tk_client
;
410 dprintk("RPC: %4d call_allocate (status %d)\n",
411 task
->tk_pid
, task
->tk_status
);
412 task
->tk_action
= call_encode
;
416 /* FIXME: compute buffer requirements more exactly using
418 bufsiz
= rpcproc_bufsiz(clnt
, task
->tk_proc
) + RPC_SLACK_SPACE
;
420 if ((task
->tk_buffer
= rpc_malloc(task
, bufsiz
)) != NULL
)
422 printk("RPC: buffer allocation failed for task %p\n", task
);
426 task
->tk_action
= call_reserve
;
431 rpc_exit(task
, -ERESTARTSYS
);
435 * 3. Encode arguments of an RPC call
438 call_encode(struct rpc_task
*task
)
440 struct rpc_clnt
*clnt
= task
->tk_client
;
441 struct rpc_rqst
*req
= task
->tk_rqstp
;
447 dprintk("RPC: %4d call_encode (status %d)\n",
448 task
->tk_pid
, task
->tk_status
);
450 task
->tk_action
= call_transmit
;
452 /* Default buffer setup */
453 bufsiz
= rpcproc_bufsiz(clnt
, task
->tk_proc
)+RPC_SLACK_SPACE
;
454 req
->rq_svec
[0].iov_base
= task
->tk_buffer
;
455 req
->rq_svec
[0].iov_len
= bufsiz
;
458 req
->rq_rvec
[0].iov_base
= task
->tk_buffer
;
459 req
->rq_rvec
[0].iov_len
= bufsiz
;
460 req
->rq_rlen
= bufsiz
;
463 if (task
->tk_proc
> clnt
->cl_maxproc
) {
464 printk(KERN_WARNING
"%s (vers %d): bad procedure number %d\n",
465 clnt
->cl_protname
, clnt
->cl_vers
, task
->tk_proc
);
466 rpc_exit(task
, -EIO
);
470 /* Encode header and provided arguments */
471 encode
= rpcproc_encode(clnt
, task
->tk_proc
);
472 if (!(p
= call_header(task
))) {
473 printk("RPC: call_header failed, exit EIO\n");
474 rpc_exit(task
, -EIO
);
476 if ((status
= encode(req
, p
, task
->tk_argp
)) < 0) {
477 printk(KERN_WARNING
"%s: can't encode arguments: %d\n",
478 clnt
->cl_protname
, -status
);
479 rpc_exit(task
, status
);
484 * 4. Transmit the RPC request
487 call_transmit(struct rpc_task
*task
)
489 dprintk("RPC: %4d call_transmit (status %d)\n",
490 task
->tk_pid
, task
->tk_status
);
492 task
->tk_action
= call_receive
;
498 * 5. Wait for the RPC reply
501 call_receive(struct rpc_task
*task
)
503 dprintk("RPC: %4d call_receive (status %d)\n",
504 task
->tk_pid
, task
->tk_status
);
506 task
->tk_action
= call_status
;
507 /* In case of error, evaluate status */
508 if (task
->tk_status
< 0)
511 /* If we have no decode function, this means we're performing
512 * a void call (a la lockd message passing). */
513 if (!rpcproc_decode(task
->tk_client
, task
->tk_proc
)) {
514 rpc_remove_wait_queue(task
); /* remove from xprt_pending */
515 task
->tk_action
= NULL
;
523 * 6. Sort out the RPC call status
526 call_status(struct rpc_task
*task
)
528 struct rpc_clnt
*clnt
= task
->tk_client
;
529 struct rpc_rqst
*req
;
530 int status
= task
->tk_status
;
532 dprintk("RPC: %4d call_status (status %d)\n",
533 task
->tk_pid
, task
->tk_status
);
536 task
->tk_action
= call_decode
;
537 } else if (status
== -ETIMEDOUT
) {
538 task
->tk_action
= call_timeout
;
539 } else if (status
== -EAGAIN
) {
540 if (!(req
= task
->tk_rqstp
))
541 task
->tk_action
= call_reserve
;
542 else if (!task
->tk_buffer
)
543 task
->tk_action
= call_allocate
;
544 else if (req
->rq_damaged
)
545 task
->tk_action
= call_encode
;
547 task
->tk_action
= call_transmit
;
548 } else if (status
== -ENOTCONN
) {
549 task
->tk_action
= call_reconnect
;
550 } else if (status
== -ECONNREFUSED
&& clnt
->cl_autobind
) {
551 task
->tk_action
= call_bind
;
555 printk("%s: RPC call returned error %d\n",
556 clnt
->cl_protname
, -status
);
557 task
->tk_action
= NULL
;
563 * 6a. Handle RPC timeout
564 * We do not release the request slot, so we keep using the
565 * same XID for all retransmits.
568 call_timeout(struct rpc_task
*task
)
570 struct rpc_clnt
*clnt
= task
->tk_client
;
571 struct rpc_rqst
*req
= task
->tk_rqstp
;
574 struct rpc_timeout
*to
= &req
->rq_timeout
;
576 if (xprt_adjust_timeout(to
)) {
577 dprintk("RPC: %4d call_timeout (minor timeo)\n",
581 to
->to_initval
<<= 1;
582 if (to
->to_initval
> to
->to_maxval
)
583 to
->to_initval
= to
->to_maxval
;
586 dprintk("RPC: %4d call_timeout (major timeo)\n", task
->tk_pid
);
587 if (clnt
->cl_softrtry
) {
588 if (clnt
->cl_chatty
&& !task
->tk_exit
)
589 printk("%s: server %s not responding, timed out\n",
590 clnt
->cl_protname
, clnt
->cl_server
);
591 rpc_exit(task
, -EIO
);
594 if (clnt
->cl_chatty
&& !(task
->tk_flags
& RPC_CALL_MAJORSEEN
)) {
595 task
->tk_flags
|= RPC_CALL_MAJORSEEN
;
597 printk("%s: server %s not responding, still trying\n",
598 clnt
->cl_protname
, clnt
->cl_server
);
600 printk("%s: task %d can't get a request slot\n",
601 clnt
->cl_protname
, task
->tk_pid
);
603 if (clnt
->cl_autobind
)
607 if (!clnt
->cl_port
) {
608 task
->tk_action
= call_bind
;
610 task
->tk_action
= call_reserve
;
611 } else if (req
->rq_damaged
) {
612 task
->tk_action
= call_encode
;
613 clnt
->cl_stats
->rpcretrans
++;
615 task
->tk_action
= call_transmit
;
616 clnt
->cl_stats
->rpcretrans
++;
622 * 6b. Reconnect to the RPC server (TCP case)
625 call_reconnect(struct rpc_task
*task
)
627 dprintk("RPC: %4d call_reconnect status %d\n",
628 task
->tk_pid
, task
->tk_status
);
629 if (task
->tk_status
== 0) {
630 task
->tk_action
= call_status
;
631 task
->tk_status
= -EAGAIN
;
634 task
->tk_client
->cl_stats
->netreconn
++;
635 xprt_reconnect(task
);
639 * 7. Decode the RPC reply
642 call_decode(struct rpc_task
*task
)
644 struct rpc_clnt
*clnt
= task
->tk_client
;
645 struct rpc_rqst
*req
= task
->tk_rqstp
;
646 kxdrproc_t decode
= rpcproc_decode(clnt
, task
->tk_proc
);
649 dprintk("RPC: %4d call_decode (status %d)\n",
650 task
->tk_pid
, task
->tk_status
);
652 if (clnt
->cl_chatty
&& (task
->tk_flags
& RPC_CALL_MAJORSEEN
)) {
653 printk("%s: server %s OK\n",
654 clnt
->cl_protname
, clnt
->cl_server
);
655 task
->tk_flags
&= ~RPC_CALL_MAJORSEEN
;
658 if (task
->tk_status
< 12) {
659 printk("%s: too small RPC reply size (%d bytes)\n",
660 clnt
->cl_protname
, task
->tk_status
);
661 rpc_exit(task
, -EIO
);
665 /* Verify the RPC header */
666 if (!(p
= call_verify(task
)))
670 * The following is an NFS-specific hack to cater for setuid
671 * processes whose uid is mapped to nobody on the server.
673 if (task
->tk_client
->cl_prog
== 100003 &&
674 (ntohl(*p
) == NFSERR_ACCES
|| ntohl(*p
) == NFSERR_PERM
)) {
675 if (RPC_IS_SETUID(task
) && (task
->tk_suid_retry
)--) {
676 dprintk("RPC: %4d retry squashed uid\n", task
->tk_pid
);
677 task
->tk_flags
^= RPC_CALL_REALUID
;
678 task
->tk_action
= call_encode
;
683 task
->tk_action
= NULL
;
684 task
->tk_status
= decode(req
, p
, task
->tk_resp
);
685 dprintk("RPC: %4d call_decode result %d\n", task
->tk_pid
,
690 * 8. Refresh the credentials if rejected by the server
693 call_refresh(struct rpc_task
*task
)
695 dprintk("RPC: %4d call_refresh\n", task
->tk_pid
);
697 xprt_release(task
); /* Must do to obtain new XID */
698 task
->tk_action
= call_refreshresult
;
700 task
->tk_client
->cl_stats
->rpcauthrefresh
++;
701 rpcauth_refreshcred(task
);
705 * 8a. Process the results of a credential refresh
708 call_refreshresult(struct rpc_task
*task
)
710 dprintk("RPC: %4d call_refreshresult (status %d)\n",
711 task
->tk_pid
, task
->tk_status
);
713 if (task
->tk_status
< 0) {
714 task
->tk_status
= -EACCES
;
715 task
->tk_action
= NULL
;
717 task
->tk_action
= call_reserve
;
721 * Call header serialization
724 call_header(struct rpc_task
*task
)
726 struct rpc_clnt
*clnt
= task
->tk_client
;
727 struct rpc_xprt
*xprt
= clnt
->cl_xprt
;
728 u32
*p
= task
->tk_buffer
;
730 /* FIXME: check buffer size? */
732 *p
++ = 0; /* fill in later */
733 *p
++ = task
->tk_rqstp
->rq_xid
; /* XID */
734 *p
++ = htonl(RPC_CALL
); /* CALL */
735 *p
++ = htonl(RPC_VERSION
); /* RPC version */
736 *p
++ = htonl(clnt
->cl_prog
); /* program number */
737 *p
++ = htonl(clnt
->cl_vers
); /* program version */
738 *p
++ = htonl(task
->tk_proc
); /* procedure */
739 return rpcauth_marshcred(task
, p
);
743 * Reply header verification
746 call_verify(struct rpc_task
*task
)
748 u32
*p
= task
->tk_buffer
, n
;
750 p
+= 1; /* skip XID */
752 if ((n
= ntohl(*p
++)) != RPC_REPLY
) {
753 printk("call_verify: not an RPC reply: %x\n", n
);
756 if ((n
= ntohl(*p
++)) != RPC_MSG_ACCEPTED
) {
759 if ((n
= ntohl(*p
++)) != RPC_AUTH_ERROR
) {
760 printk("call_verify: RPC call rejected: %x\n", n
);
762 switch ((n
= ntohl(*p
++))) {
763 case RPC_AUTH_REJECTEDCRED
:
764 case RPC_AUTH_REJECTEDVERF
:
765 if (!task
->tk_cred_retry
--)
767 dprintk("RPC: %4d call_verify: retry stale creds\n",
769 rpcauth_invalcred(task
);
770 task
->tk_action
= call_refresh
;
772 case RPC_AUTH_BADCRED
:
773 case RPC_AUTH_BADVERF
:
774 /* possibly garbled cred/verf? */
775 if (!task
->tk_garb_retry
--)
777 dprintk("RPC: %4d call_verify: retry garbled creds\n",
779 task
->tk_action
= call_encode
;
781 case RPC_AUTH_TOOWEAK
:
782 printk("call_verify: server requires stronger "
783 "authentication.\n");
785 printk("call_verify: unknown auth error: %x\n", n
);
788 dprintk("RPC: %4d call_verify: call rejected %d\n",
790 rpc_exit(task
, error
);
793 if (!(p
= rpcauth_checkverf(task
, p
))) {
794 printk("call_verify: auth check failed\n");
795 goto garbage
; /* bad verifier, retry */
797 switch ((n
= ntohl(*p
++))) {
800 case RPC_GARBAGE_ARGS
:
803 printk("call_verify: server accept status: %x\n", n
);
808 dprintk("RPC: %4d call_verify: server saw garbage\n", task
->tk_pid
);
809 task
->tk_client
->cl_stats
->rpcgarbage
++;
810 if (task
->tk_garb_retry
--) {
811 printk("RPC: garbage, retrying %4d\n", task
->tk_pid
);
812 task
->tk_action
= call_encode
;
815 printk("RPC: garbage, exit EIO\n");
816 rpc_exit(task
, -EIO
);