2 * linux/net/sunrpc/clnt.c
4 * This file contains the high-level RPC interface.
5 * It is modeled as a finite state machine to support both synchronous
6 * and asynchronous requests.
8 * - RPC header generation and argument serialization.
9 * - Credential refresh.
10 * - TCP connect handling.
11 * - Retry of operation when it is suspected the operation failed because
12 * of uid squashing on the server, or when the credentials were stale
13 * and need to be refreshed, or when a packet was damaged in transit.
14 * This may be have to be moved to the VFS layer.
16 * Copyright (C) 1992,1993 Rick Sladkey <jrs@world.std.com>
17 * Copyright (C) 1995,1996 Olaf Kirch <okir@monad.swb.de>
21 #include <linux/module.h>
22 #include <linux/types.h>
23 #include <linux/kallsyms.h>
25 #include <linux/namei.h>
26 #include <linux/mount.h>
27 #include <linux/slab.h>
28 #include <linux/rcupdate.h>
29 #include <linux/utsname.h>
30 #include <linux/workqueue.h>
32 #include <linux/in6.h>
35 #include <linux/sunrpc/clnt.h>
36 #include <linux/sunrpc/addr.h>
37 #include <linux/sunrpc/rpc_pipe_fs.h>
38 #include <linux/sunrpc/metrics.h>
39 #include <linux/sunrpc/bc_xprt.h>
40 #include <trace/events/sunrpc.h>
46 # define RPCDBG_FACILITY RPCDBG_CALL
49 #define dprint_status(t) \
50 dprintk("RPC: %5u %s (status %d)\n", t->tk_pid, \
51 __func__, t->tk_status)
54 * All RPC clients are linked into this list
57 static DECLARE_WAIT_QUEUE_HEAD(destroy_wait
);
60 static void call_start(struct rpc_task
*task
);
61 static void call_reserve(struct rpc_task
*task
);
62 static void call_reserveresult(struct rpc_task
*task
);
63 static void call_allocate(struct rpc_task
*task
);
64 static void call_decode(struct rpc_task
*task
);
65 static void call_bind(struct rpc_task
*task
);
66 static void call_bind_status(struct rpc_task
*task
);
67 static void call_transmit(struct rpc_task
*task
);
68 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
69 static void call_bc_transmit(struct rpc_task
*task
);
70 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
71 static void call_status(struct rpc_task
*task
);
72 static void call_transmit_status(struct rpc_task
*task
);
73 static void call_refresh(struct rpc_task
*task
);
74 static void call_refreshresult(struct rpc_task
*task
);
75 static void call_timeout(struct rpc_task
*task
);
76 static void call_connect(struct rpc_task
*task
);
77 static void call_connect_status(struct rpc_task
*task
);
79 static __be32
*rpc_encode_header(struct rpc_task
*task
);
80 static __be32
*rpc_verify_header(struct rpc_task
*task
);
81 static int rpc_ping(struct rpc_clnt
*clnt
);
83 static void rpc_register_client(struct rpc_clnt
*clnt
)
85 struct net
*net
= rpc_net_ns(clnt
);
86 struct sunrpc_net
*sn
= net_generic(net
, sunrpc_net_id
);
88 spin_lock(&sn
->rpc_client_lock
);
89 list_add(&clnt
->cl_clients
, &sn
->all_clients
);
90 spin_unlock(&sn
->rpc_client_lock
);
93 static void rpc_unregister_client(struct rpc_clnt
*clnt
)
95 struct net
*net
= rpc_net_ns(clnt
);
96 struct sunrpc_net
*sn
= net_generic(net
, sunrpc_net_id
);
98 spin_lock(&sn
->rpc_client_lock
);
99 list_del(&clnt
->cl_clients
);
100 spin_unlock(&sn
->rpc_client_lock
);
103 static void __rpc_clnt_remove_pipedir(struct rpc_clnt
*clnt
)
105 rpc_remove_client_dir(clnt
);
108 static void rpc_clnt_remove_pipedir(struct rpc_clnt
*clnt
)
110 struct net
*net
= rpc_net_ns(clnt
);
111 struct super_block
*pipefs_sb
;
113 pipefs_sb
= rpc_get_sb_net(net
);
115 __rpc_clnt_remove_pipedir(clnt
);
120 static struct dentry
*rpc_setup_pipedir_sb(struct super_block
*sb
,
121 struct rpc_clnt
*clnt
)
123 static uint32_t clntid
;
124 const char *dir_name
= clnt
->cl_program
->pipe_dir_name
;
126 struct dentry
*dir
, *dentry
;
128 dir
= rpc_d_lookup_sb(sb
, dir_name
);
130 pr_info("RPC: pipefs directory doesn't exist: %s\n", dir_name
);
134 snprintf(name
, sizeof(name
), "clnt%x", (unsigned int)clntid
++);
135 name
[sizeof(name
) - 1] = '\0';
136 dentry
= rpc_create_client_dir(dir
, name
, clnt
);
139 if (dentry
== ERR_PTR(-EEXIST
))
141 printk(KERN_INFO
"RPC: Couldn't create pipefs entry"
142 " %s/%s, error %ld\n",
143 dir_name
, name
, PTR_ERR(dentry
));
151 rpc_setup_pipedir(struct super_block
*pipefs_sb
, struct rpc_clnt
*clnt
)
153 struct dentry
*dentry
;
155 if (clnt
->cl_program
->pipe_dir_name
!= NULL
) {
156 dentry
= rpc_setup_pipedir_sb(pipefs_sb
, clnt
);
158 return PTR_ERR(dentry
);
163 static int rpc_clnt_skip_event(struct rpc_clnt
*clnt
, unsigned long event
)
165 if (clnt
->cl_program
->pipe_dir_name
== NULL
)
169 case RPC_PIPEFS_MOUNT
:
170 if (clnt
->cl_pipedir_objects
.pdh_dentry
!= NULL
)
172 if (atomic_read(&clnt
->cl_count
) == 0)
175 case RPC_PIPEFS_UMOUNT
:
176 if (clnt
->cl_pipedir_objects
.pdh_dentry
== NULL
)
183 static int __rpc_clnt_handle_event(struct rpc_clnt
*clnt
, unsigned long event
,
184 struct super_block
*sb
)
186 struct dentry
*dentry
;
190 case RPC_PIPEFS_MOUNT
:
191 dentry
= rpc_setup_pipedir_sb(sb
, clnt
);
195 return PTR_ERR(dentry
);
197 case RPC_PIPEFS_UMOUNT
:
198 __rpc_clnt_remove_pipedir(clnt
);
201 printk(KERN_ERR
"%s: unknown event: %ld\n", __func__
, event
);
207 static int __rpc_pipefs_event(struct rpc_clnt
*clnt
, unsigned long event
,
208 struct super_block
*sb
)
212 for (;; clnt
= clnt
->cl_parent
) {
213 if (!rpc_clnt_skip_event(clnt
, event
))
214 error
= __rpc_clnt_handle_event(clnt
, event
, sb
);
215 if (error
|| clnt
== clnt
->cl_parent
)
221 static struct rpc_clnt
*rpc_get_client_for_event(struct net
*net
, int event
)
223 struct sunrpc_net
*sn
= net_generic(net
, sunrpc_net_id
);
224 struct rpc_clnt
*clnt
;
226 spin_lock(&sn
->rpc_client_lock
);
227 list_for_each_entry(clnt
, &sn
->all_clients
, cl_clients
) {
228 if (rpc_clnt_skip_event(clnt
, event
))
230 spin_unlock(&sn
->rpc_client_lock
);
233 spin_unlock(&sn
->rpc_client_lock
);
237 static int rpc_pipefs_event(struct notifier_block
*nb
, unsigned long event
,
240 struct super_block
*sb
= ptr
;
241 struct rpc_clnt
*clnt
;
244 while ((clnt
= rpc_get_client_for_event(sb
->s_fs_info
, event
))) {
245 error
= __rpc_pipefs_event(clnt
, event
, sb
);
252 static struct notifier_block rpc_clients_block
= {
253 .notifier_call
= rpc_pipefs_event
,
254 .priority
= SUNRPC_PIPEFS_RPC_PRIO
,
257 int rpc_clients_notifier_register(void)
259 return rpc_pipefs_notifier_register(&rpc_clients_block
);
262 void rpc_clients_notifier_unregister(void)
264 return rpc_pipefs_notifier_unregister(&rpc_clients_block
);
267 static struct rpc_xprt
*rpc_clnt_set_transport(struct rpc_clnt
*clnt
,
268 struct rpc_xprt
*xprt
,
269 const struct rpc_timeout
*timeout
)
271 struct rpc_xprt
*old
;
273 spin_lock(&clnt
->cl_lock
);
274 old
= rcu_dereference_protected(clnt
->cl_xprt
,
275 lockdep_is_held(&clnt
->cl_lock
));
277 if (!xprt_bound(xprt
))
278 clnt
->cl_autobind
= 1;
280 clnt
->cl_timeout
= timeout
;
281 rcu_assign_pointer(clnt
->cl_xprt
, xprt
);
282 spin_unlock(&clnt
->cl_lock
);
287 static void rpc_clnt_set_nodename(struct rpc_clnt
*clnt
, const char *nodename
)
289 clnt
->cl_nodelen
= strlen(nodename
);
290 if (clnt
->cl_nodelen
> UNX_MAXNODENAME
)
291 clnt
->cl_nodelen
= UNX_MAXNODENAME
;
292 memcpy(clnt
->cl_nodename
, nodename
, clnt
->cl_nodelen
);
295 static int rpc_client_register(struct rpc_clnt
*clnt
,
296 rpc_authflavor_t pseudoflavor
,
297 const char *client_name
)
299 struct rpc_auth_create_args auth_args
= {
300 .pseudoflavor
= pseudoflavor
,
301 .target_name
= client_name
,
303 struct rpc_auth
*auth
;
304 struct net
*net
= rpc_net_ns(clnt
);
305 struct super_block
*pipefs_sb
;
308 pipefs_sb
= rpc_get_sb_net(net
);
310 err
= rpc_setup_pipedir(pipefs_sb
, clnt
);
315 rpc_register_client(clnt
);
319 auth
= rpcauth_create(&auth_args
, clnt
);
321 dprintk("RPC: Couldn't create auth handle (flavor %u)\n",
328 pipefs_sb
= rpc_get_sb_net(net
);
329 rpc_unregister_client(clnt
);
330 __rpc_clnt_remove_pipedir(clnt
);
337 static DEFINE_IDA(rpc_clids
);
339 static int rpc_alloc_clid(struct rpc_clnt
*clnt
)
343 clid
= ida_simple_get(&rpc_clids
, 0, 0, GFP_KERNEL
);
346 clnt
->cl_clid
= clid
;
350 static void rpc_free_clid(struct rpc_clnt
*clnt
)
352 ida_simple_remove(&rpc_clids
, clnt
->cl_clid
);
355 static struct rpc_clnt
* rpc_new_client(const struct rpc_create_args
*args
,
356 struct rpc_xprt
*xprt
,
357 struct rpc_clnt
*parent
)
359 const struct rpc_program
*program
= args
->program
;
360 const struct rpc_version
*version
;
361 struct rpc_clnt
*clnt
= NULL
;
362 const struct rpc_timeout
*timeout
;
365 /* sanity check the name before trying to print it */
366 dprintk("RPC: creating %s client for %s (xprt %p)\n",
367 program
->name
, args
->servername
, xprt
);
374 if (args
->version
>= program
->nrvers
)
376 version
= program
->version
[args
->version
];
381 clnt
= kzalloc(sizeof(*clnt
), GFP_KERNEL
);
384 clnt
->cl_parent
= parent
? : clnt
;
386 err
= rpc_alloc_clid(clnt
);
390 clnt
->cl_procinfo
= version
->procs
;
391 clnt
->cl_maxproc
= version
->nrprocs
;
392 clnt
->cl_prog
= args
->prognumber
? : program
->number
;
393 clnt
->cl_vers
= version
->number
;
394 clnt
->cl_stats
= program
->stats
;
395 clnt
->cl_metrics
= rpc_alloc_iostats(clnt
);
396 rpc_init_pipe_dir_head(&clnt
->cl_pipedir_objects
);
398 if (clnt
->cl_metrics
== NULL
)
400 clnt
->cl_program
= program
;
401 INIT_LIST_HEAD(&clnt
->cl_tasks
);
402 spin_lock_init(&clnt
->cl_lock
);
404 timeout
= xprt
->timeout
;
405 if (args
->timeout
!= NULL
) {
406 memcpy(&clnt
->cl_timeout_default
, args
->timeout
,
407 sizeof(clnt
->cl_timeout_default
));
408 timeout
= &clnt
->cl_timeout_default
;
411 rpc_clnt_set_transport(clnt
, xprt
, timeout
);
413 clnt
->cl_rtt
= &clnt
->cl_rtt_default
;
414 rpc_init_rtt(&clnt
->cl_rtt_default
, clnt
->cl_timeout
->to_initval
);
416 atomic_set(&clnt
->cl_count
, 1);
418 /* save the nodename */
419 rpc_clnt_set_nodename(clnt
, utsname()->nodename
);
421 err
= rpc_client_register(clnt
, args
->authflavor
, args
->client_name
);
425 atomic_inc(&parent
->cl_count
);
429 rpc_free_iostats(clnt
->cl_metrics
);
442 * rpc_create - create an RPC client and transport with one call
443 * @args: rpc_clnt create argument structure
445 * Creates and initializes an RPC transport and an RPC client.
447 * It can ping the server in order to determine if it is up, and to see if
448 * it supports this program and version. RPC_CLNT_CREATE_NOPING disables
449 * this behavior so asynchronous tasks can also use rpc_create.
451 struct rpc_clnt
*rpc_create(struct rpc_create_args
*args
)
453 struct rpc_xprt
*xprt
;
454 struct rpc_clnt
*clnt
;
455 struct xprt_create xprtargs
= {
457 .ident
= args
->protocol
,
458 .srcaddr
= args
->saddress
,
459 .dstaddr
= args
->address
,
460 .addrlen
= args
->addrsize
,
461 .servername
= args
->servername
,
462 .bc_xprt
= args
->bc_xprt
,
466 if (args
->flags
& RPC_CLNT_CREATE_INFINITE_SLOTS
)
467 xprtargs
.flags
|= XPRT_CREATE_INFINITE_SLOTS
;
468 if (args
->flags
& RPC_CLNT_CREATE_NO_IDLE_TIMEOUT
)
469 xprtargs
.flags
|= XPRT_CREATE_NO_IDLE_TIMEOUT
;
471 * If the caller chooses not to specify a hostname, whip
472 * up a string representation of the passed-in address.
474 if (xprtargs
.servername
== NULL
) {
475 struct sockaddr_un
*sun
=
476 (struct sockaddr_un
*)args
->address
;
477 struct sockaddr_in
*sin
=
478 (struct sockaddr_in
*)args
->address
;
479 struct sockaddr_in6
*sin6
=
480 (struct sockaddr_in6
*)args
->address
;
482 servername
[0] = '\0';
483 switch (args
->address
->sa_family
) {
485 snprintf(servername
, sizeof(servername
), "%s",
489 snprintf(servername
, sizeof(servername
), "%pI4",
490 &sin
->sin_addr
.s_addr
);
493 snprintf(servername
, sizeof(servername
), "%pI6",
497 /* caller wants default server name, but
498 * address family isn't recognized. */
499 return ERR_PTR(-EINVAL
);
501 xprtargs
.servername
= servername
;
504 xprt
= xprt_create_transport(&xprtargs
);
506 return (struct rpc_clnt
*)xprt
;
509 * By default, kernel RPC client connects from a reserved port.
510 * CAP_NET_BIND_SERVICE will not be set for unprivileged requesters,
511 * but it is always enabled for rpciod, which handles the connect
515 if (args
->flags
& RPC_CLNT_CREATE_NONPRIVPORT
)
518 clnt
= rpc_new_client(args
, xprt
, NULL
);
522 if (!(args
->flags
& RPC_CLNT_CREATE_NOPING
)) {
523 int err
= rpc_ping(clnt
);
525 rpc_shutdown_client(clnt
);
530 clnt
->cl_softrtry
= 1;
531 if (args
->flags
& RPC_CLNT_CREATE_HARDRTRY
)
532 clnt
->cl_softrtry
= 0;
534 if (args
->flags
& RPC_CLNT_CREATE_AUTOBIND
)
535 clnt
->cl_autobind
= 1;
536 if (args
->flags
& RPC_CLNT_CREATE_DISCRTRY
)
537 clnt
->cl_discrtry
= 1;
538 if (!(args
->flags
& RPC_CLNT_CREATE_QUIET
))
543 EXPORT_SYMBOL_GPL(rpc_create
);
546 * This function clones the RPC client structure. It allows us to share the
547 * same transport while varying parameters such as the authentication
550 static struct rpc_clnt
*__rpc_clone_client(struct rpc_create_args
*args
,
551 struct rpc_clnt
*clnt
)
553 struct rpc_xprt
*xprt
;
554 struct rpc_clnt
*new;
559 xprt
= xprt_get(rcu_dereference(clnt
->cl_xprt
));
563 args
->servername
= xprt
->servername
;
565 new = rpc_new_client(args
, xprt
, clnt
);
571 /* Turn off autobind on clones */
572 new->cl_autobind
= 0;
573 new->cl_softrtry
= clnt
->cl_softrtry
;
574 new->cl_discrtry
= clnt
->cl_discrtry
;
575 new->cl_chatty
= clnt
->cl_chatty
;
579 dprintk("RPC: %s: returned error %d\n", __func__
, err
);
584 * rpc_clone_client - Clone an RPC client structure
586 * @clnt: RPC client whose parameters are copied
588 * Returns a fresh RPC client or an ERR_PTR.
590 struct rpc_clnt
*rpc_clone_client(struct rpc_clnt
*clnt
)
592 struct rpc_create_args args
= {
593 .program
= clnt
->cl_program
,
594 .prognumber
= clnt
->cl_prog
,
595 .version
= clnt
->cl_vers
,
596 .authflavor
= clnt
->cl_auth
->au_flavor
,
598 return __rpc_clone_client(&args
, clnt
);
600 EXPORT_SYMBOL_GPL(rpc_clone_client
);
603 * rpc_clone_client_set_auth - Clone an RPC client structure and set its auth
605 * @clnt: RPC client whose parameters are copied
606 * @flavor: security flavor for new client
608 * Returns a fresh RPC client or an ERR_PTR.
611 rpc_clone_client_set_auth(struct rpc_clnt
*clnt
, rpc_authflavor_t flavor
)
613 struct rpc_create_args args
= {
614 .program
= clnt
->cl_program
,
615 .prognumber
= clnt
->cl_prog
,
616 .version
= clnt
->cl_vers
,
617 .authflavor
= flavor
,
619 return __rpc_clone_client(&args
, clnt
);
621 EXPORT_SYMBOL_GPL(rpc_clone_client_set_auth
);
624 * rpc_switch_client_transport: switch the RPC transport on the fly
625 * @clnt: pointer to a struct rpc_clnt
626 * @args: pointer to the new transport arguments
627 * @timeout: pointer to the new timeout parameters
629 * This function allows the caller to switch the RPC transport for the
630 * rpc_clnt structure 'clnt' to allow it to connect to a mirrored NFS
631 * server, for instance. It assumes that the caller has ensured that
632 * there are no active RPC tasks by using some form of locking.
634 * Returns zero if "clnt" is now using the new xprt. Otherwise a
635 * negative errno is returned, and "clnt" continues to use the old
638 int rpc_switch_client_transport(struct rpc_clnt
*clnt
,
639 struct xprt_create
*args
,
640 const struct rpc_timeout
*timeout
)
642 const struct rpc_timeout
*old_timeo
;
643 rpc_authflavor_t pseudoflavor
;
644 struct rpc_xprt
*xprt
, *old
;
645 struct rpc_clnt
*parent
;
648 xprt
= xprt_create_transport(args
);
650 dprintk("RPC: failed to create new xprt for clnt %p\n",
652 return PTR_ERR(xprt
);
655 pseudoflavor
= clnt
->cl_auth
->au_flavor
;
657 old_timeo
= clnt
->cl_timeout
;
658 old
= rpc_clnt_set_transport(clnt
, xprt
, timeout
);
660 rpc_unregister_client(clnt
);
661 __rpc_clnt_remove_pipedir(clnt
);
664 * A new transport was created. "clnt" therefore
665 * becomes the root of a new cl_parent tree. clnt's
666 * children, if it has any, still point to the old xprt.
668 parent
= clnt
->cl_parent
;
669 clnt
->cl_parent
= clnt
;
672 * The old rpc_auth cache cannot be re-used. GSS
673 * contexts in particular are between a single
676 err
= rpc_client_register(clnt
, pseudoflavor
, NULL
);
682 rpc_release_client(parent
);
684 dprintk("RPC: replaced xprt for clnt %p\n", clnt
);
688 rpc_clnt_set_transport(clnt
, old
, old_timeo
);
689 clnt
->cl_parent
= parent
;
690 rpc_client_register(clnt
, pseudoflavor
, NULL
);
692 dprintk("RPC: failed to switch xprt for clnt %p\n", clnt
);
695 EXPORT_SYMBOL_GPL(rpc_switch_client_transport
);
698 * Kill all tasks for the given client.
699 * XXX: kill their descendants as well?
701 void rpc_killall_tasks(struct rpc_clnt
*clnt
)
703 struct rpc_task
*rovr
;
706 if (list_empty(&clnt
->cl_tasks
))
708 dprintk("RPC: killing all tasks for client %p\n", clnt
);
710 * Spin lock all_tasks to prevent changes...
712 spin_lock(&clnt
->cl_lock
);
713 list_for_each_entry(rovr
, &clnt
->cl_tasks
, tk_task
) {
714 if (!RPC_IS_ACTIVATED(rovr
))
716 if (!(rovr
->tk_flags
& RPC_TASK_KILLED
)) {
717 rovr
->tk_flags
|= RPC_TASK_KILLED
;
718 rpc_exit(rovr
, -EIO
);
719 if (RPC_IS_QUEUED(rovr
))
720 rpc_wake_up_queued_task(rovr
->tk_waitqueue
,
724 spin_unlock(&clnt
->cl_lock
);
726 EXPORT_SYMBOL_GPL(rpc_killall_tasks
);
729 * Properly shut down an RPC client, terminating all outstanding
732 void rpc_shutdown_client(struct rpc_clnt
*clnt
)
736 dprintk_rcu("RPC: shutting down %s client for %s\n",
737 clnt
->cl_program
->name
,
738 rcu_dereference(clnt
->cl_xprt
)->servername
);
740 while (!list_empty(&clnt
->cl_tasks
)) {
741 rpc_killall_tasks(clnt
);
742 wait_event_timeout(destroy_wait
,
743 list_empty(&clnt
->cl_tasks
), 1*HZ
);
746 rpc_release_client(clnt
);
748 EXPORT_SYMBOL_GPL(rpc_shutdown_client
);
753 static struct rpc_clnt
*
754 rpc_free_client(struct rpc_clnt
*clnt
)
756 struct rpc_clnt
*parent
= NULL
;
758 dprintk_rcu("RPC: destroying %s client for %s\n",
759 clnt
->cl_program
->name
,
760 rcu_dereference(clnt
->cl_xprt
)->servername
);
761 if (clnt
->cl_parent
!= clnt
)
762 parent
= clnt
->cl_parent
;
763 rpc_clnt_remove_pipedir(clnt
);
764 rpc_unregister_client(clnt
);
765 rpc_free_iostats(clnt
->cl_metrics
);
766 clnt
->cl_metrics
= NULL
;
767 xprt_put(rcu_dereference_raw(clnt
->cl_xprt
));
777 static struct rpc_clnt
*
778 rpc_free_auth(struct rpc_clnt
*clnt
)
780 if (clnt
->cl_auth
== NULL
)
781 return rpc_free_client(clnt
);
784 * Note: RPCSEC_GSS may need to send NULL RPC calls in order to
785 * release remaining GSS contexts. This mechanism ensures
786 * that it can do so safely.
788 atomic_inc(&clnt
->cl_count
);
789 rpcauth_release(clnt
->cl_auth
);
790 clnt
->cl_auth
= NULL
;
791 if (atomic_dec_and_test(&clnt
->cl_count
))
792 return rpc_free_client(clnt
);
797 * Release reference to the RPC client
800 rpc_release_client(struct rpc_clnt
*clnt
)
802 dprintk("RPC: rpc_release_client(%p)\n", clnt
);
805 if (list_empty(&clnt
->cl_tasks
))
806 wake_up(&destroy_wait
);
807 if (!atomic_dec_and_test(&clnt
->cl_count
))
809 clnt
= rpc_free_auth(clnt
);
810 } while (clnt
!= NULL
);
812 EXPORT_SYMBOL_GPL(rpc_release_client
);
815 * rpc_bind_new_program - bind a new RPC program to an existing client
816 * @old: old rpc_client
817 * @program: rpc program to set
818 * @vers: rpc program version
820 * Clones the rpc client and sets up a new RPC program. This is mainly
821 * of use for enabling different RPC programs to share the same transport.
822 * The Sun NFSv2/v3 ACL protocol can do this.
824 struct rpc_clnt
*rpc_bind_new_program(struct rpc_clnt
*old
,
825 const struct rpc_program
*program
,
828 struct rpc_create_args args
= {
830 .prognumber
= program
->number
,
832 .authflavor
= old
->cl_auth
->au_flavor
,
834 struct rpc_clnt
*clnt
;
837 clnt
= __rpc_clone_client(&args
, old
);
840 err
= rpc_ping(clnt
);
842 rpc_shutdown_client(clnt
);
848 EXPORT_SYMBOL_GPL(rpc_bind_new_program
);
850 void rpc_task_release_client(struct rpc_task
*task
)
852 struct rpc_clnt
*clnt
= task
->tk_client
;
855 /* Remove from client task list */
856 spin_lock(&clnt
->cl_lock
);
857 list_del(&task
->tk_task
);
858 spin_unlock(&clnt
->cl_lock
);
859 task
->tk_client
= NULL
;
861 rpc_release_client(clnt
);
866 void rpc_task_set_client(struct rpc_task
*task
, struct rpc_clnt
*clnt
)
869 rpc_task_release_client(task
);
870 task
->tk_client
= clnt
;
871 atomic_inc(&clnt
->cl_count
);
872 if (clnt
->cl_softrtry
)
873 task
->tk_flags
|= RPC_TASK_SOFT
;
874 if (clnt
->cl_noretranstimeo
)
875 task
->tk_flags
|= RPC_TASK_NO_RETRANS_TIMEOUT
;
876 if (sk_memalloc_socks()) {
877 struct rpc_xprt
*xprt
;
880 xprt
= rcu_dereference(clnt
->cl_xprt
);
882 task
->tk_flags
|= RPC_TASK_SWAPPER
;
885 /* Add to the client's list of all tasks */
886 spin_lock(&clnt
->cl_lock
);
887 list_add_tail(&task
->tk_task
, &clnt
->cl_tasks
);
888 spin_unlock(&clnt
->cl_lock
);
892 void rpc_task_reset_client(struct rpc_task
*task
, struct rpc_clnt
*clnt
)
894 rpc_task_release_client(task
);
895 rpc_task_set_client(task
, clnt
);
897 EXPORT_SYMBOL_GPL(rpc_task_reset_client
);
901 rpc_task_set_rpc_message(struct rpc_task
*task
, const struct rpc_message
*msg
)
904 task
->tk_msg
.rpc_proc
= msg
->rpc_proc
;
905 task
->tk_msg
.rpc_argp
= msg
->rpc_argp
;
906 task
->tk_msg
.rpc_resp
= msg
->rpc_resp
;
907 if (msg
->rpc_cred
!= NULL
)
908 task
->tk_msg
.rpc_cred
= get_rpccred(msg
->rpc_cred
);
913 * Default callback for async RPC calls
916 rpc_default_callback(struct rpc_task
*task
, void *data
)
920 static const struct rpc_call_ops rpc_default_ops
= {
921 .rpc_call_done
= rpc_default_callback
,
925 * rpc_run_task - Allocate a new RPC task, then run rpc_execute against it
926 * @task_setup_data: pointer to task initialisation data
928 struct rpc_task
*rpc_run_task(const struct rpc_task_setup
*task_setup_data
)
930 struct rpc_task
*task
;
932 task
= rpc_new_task(task_setup_data
);
936 rpc_task_set_client(task
, task_setup_data
->rpc_client
);
937 rpc_task_set_rpc_message(task
, task_setup_data
->rpc_message
);
939 if (task
->tk_action
== NULL
)
940 rpc_call_start(task
);
942 atomic_inc(&task
->tk_count
);
947 EXPORT_SYMBOL_GPL(rpc_run_task
);
950 * rpc_call_sync - Perform a synchronous RPC call
951 * @clnt: pointer to RPC client
952 * @msg: RPC call parameters
953 * @flags: RPC call flags
955 int rpc_call_sync(struct rpc_clnt
*clnt
, const struct rpc_message
*msg
, int flags
)
957 struct rpc_task
*task
;
958 struct rpc_task_setup task_setup_data
= {
961 .callback_ops
= &rpc_default_ops
,
966 WARN_ON_ONCE(flags
& RPC_TASK_ASYNC
);
967 if (flags
& RPC_TASK_ASYNC
) {
968 rpc_release_calldata(task_setup_data
.callback_ops
,
969 task_setup_data
.callback_data
);
973 task
= rpc_run_task(&task_setup_data
);
975 return PTR_ERR(task
);
976 status
= task
->tk_status
;
980 EXPORT_SYMBOL_GPL(rpc_call_sync
);
983 * rpc_call_async - Perform an asynchronous RPC call
984 * @clnt: pointer to RPC client
985 * @msg: RPC call parameters
986 * @flags: RPC call flags
987 * @tk_ops: RPC call ops
988 * @data: user call data
991 rpc_call_async(struct rpc_clnt
*clnt
, const struct rpc_message
*msg
, int flags
,
992 const struct rpc_call_ops
*tk_ops
, void *data
)
994 struct rpc_task
*task
;
995 struct rpc_task_setup task_setup_data
= {
998 .callback_ops
= tk_ops
,
999 .callback_data
= data
,
1000 .flags
= flags
|RPC_TASK_ASYNC
,
1003 task
= rpc_run_task(&task_setup_data
);
1005 return PTR_ERR(task
);
1009 EXPORT_SYMBOL_GPL(rpc_call_async
);
1011 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
1013 * rpc_run_bc_task - Allocate a new RPC task for backchannel use, then run
1014 * rpc_execute against it
1016 * @tk_ops: RPC call ops
1018 struct rpc_task
*rpc_run_bc_task(struct rpc_rqst
*req
,
1019 const struct rpc_call_ops
*tk_ops
)
1021 struct rpc_task
*task
;
1022 struct xdr_buf
*xbufp
= &req
->rq_snd_buf
;
1023 struct rpc_task_setup task_setup_data
= {
1024 .callback_ops
= tk_ops
,
1027 dprintk("RPC: rpc_run_bc_task req= %p\n", req
);
1029 * Create an rpc_task to send the data
1031 task
= rpc_new_task(&task_setup_data
);
1033 xprt_free_bc_request(req
);
1036 task
->tk_rqstp
= req
;
1039 * Set up the xdr_buf length.
1040 * This also indicates that the buffer is XDR encoded already.
1042 xbufp
->len
= xbufp
->head
[0].iov_len
+ xbufp
->page_len
+
1043 xbufp
->tail
[0].iov_len
;
1045 task
->tk_action
= call_bc_transmit
;
1046 atomic_inc(&task
->tk_count
);
1047 WARN_ON_ONCE(atomic_read(&task
->tk_count
) != 2);
1051 dprintk("RPC: rpc_run_bc_task: task= %p\n", task
);
1054 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
1057 rpc_call_start(struct rpc_task
*task
)
1059 task
->tk_action
= call_start
;
1061 EXPORT_SYMBOL_GPL(rpc_call_start
);
1064 * rpc_peeraddr - extract remote peer address from clnt's xprt
1065 * @clnt: RPC client structure
1066 * @buf: target buffer
1067 * @bufsize: length of target buffer
1069 * Returns the number of bytes that are actually in the stored address.
1071 size_t rpc_peeraddr(struct rpc_clnt
*clnt
, struct sockaddr
*buf
, size_t bufsize
)
1074 struct rpc_xprt
*xprt
;
1077 xprt
= rcu_dereference(clnt
->cl_xprt
);
1079 bytes
= xprt
->addrlen
;
1080 if (bytes
> bufsize
)
1082 memcpy(buf
, &xprt
->addr
, bytes
);
1087 EXPORT_SYMBOL_GPL(rpc_peeraddr
);
1090 * rpc_peeraddr2str - return remote peer address in printable format
1091 * @clnt: RPC client structure
1092 * @format: address format
1094 * NB: the lifetime of the memory referenced by the returned pointer is
1095 * the same as the rpc_xprt itself. As long as the caller uses this
1096 * pointer, it must hold the RCU read lock.
1098 const char *rpc_peeraddr2str(struct rpc_clnt
*clnt
,
1099 enum rpc_display_format_t format
)
1101 struct rpc_xprt
*xprt
;
1103 xprt
= rcu_dereference(clnt
->cl_xprt
);
1105 if (xprt
->address_strings
[format
] != NULL
)
1106 return xprt
->address_strings
[format
];
1108 return "unprintable";
1110 EXPORT_SYMBOL_GPL(rpc_peeraddr2str
);
1112 static const struct sockaddr_in rpc_inaddr_loopback
= {
1113 .sin_family
= AF_INET
,
1114 .sin_addr
.s_addr
= htonl(INADDR_ANY
),
1117 static const struct sockaddr_in6 rpc_in6addr_loopback
= {
1118 .sin6_family
= AF_INET6
,
1119 .sin6_addr
= IN6ADDR_ANY_INIT
,
1123 * Try a getsockname() on a connected datagram socket. Using a
1124 * connected datagram socket prevents leaving a socket in TIME_WAIT.
1125 * This conserves the ephemeral port number space.
1127 * Returns zero and fills in "buf" if successful; otherwise, a
1128 * negative errno is returned.
1130 static int rpc_sockname(struct net
*net
, struct sockaddr
*sap
, size_t salen
,
1131 struct sockaddr
*buf
, int buflen
)
1133 struct socket
*sock
;
1136 err
= __sock_create(net
, sap
->sa_family
,
1137 SOCK_DGRAM
, IPPROTO_UDP
, &sock
, 1);
1139 dprintk("RPC: can't create UDP socket (%d)\n", err
);
1143 switch (sap
->sa_family
) {
1145 err
= kernel_bind(sock
,
1146 (struct sockaddr
*)&rpc_inaddr_loopback
,
1147 sizeof(rpc_inaddr_loopback
));
1150 err
= kernel_bind(sock
,
1151 (struct sockaddr
*)&rpc_in6addr_loopback
,
1152 sizeof(rpc_in6addr_loopback
));
1155 err
= -EAFNOSUPPORT
;
1159 dprintk("RPC: can't bind UDP socket (%d)\n", err
);
1163 err
= kernel_connect(sock
, sap
, salen
, 0);
1165 dprintk("RPC: can't connect UDP socket (%d)\n", err
);
1169 err
= kernel_getsockname(sock
, buf
, &buflen
);
1171 dprintk("RPC: getsockname failed (%d)\n", err
);
1176 if (buf
->sa_family
== AF_INET6
) {
1177 struct sockaddr_in6
*sin6
= (struct sockaddr_in6
*)buf
;
1178 sin6
->sin6_scope_id
= 0;
1180 dprintk("RPC: %s succeeded\n", __func__
);
1189 * Scraping a connected socket failed, so we don't have a useable
1190 * local address. Fallback: generate an address that will prevent
1191 * the server from calling us back.
1193 * Returns zero and fills in "buf" if successful; otherwise, a
1194 * negative errno is returned.
1196 static int rpc_anyaddr(int family
, struct sockaddr
*buf
, size_t buflen
)
1200 if (buflen
< sizeof(rpc_inaddr_loopback
))
1202 memcpy(buf
, &rpc_inaddr_loopback
,
1203 sizeof(rpc_inaddr_loopback
));
1206 if (buflen
< sizeof(rpc_in6addr_loopback
))
1208 memcpy(buf
, &rpc_in6addr_loopback
,
1209 sizeof(rpc_in6addr_loopback
));
1211 dprintk("RPC: %s: address family not supported\n",
1213 return -EAFNOSUPPORT
;
1215 dprintk("RPC: %s: succeeded\n", __func__
);
1220 * rpc_localaddr - discover local endpoint address for an RPC client
1221 * @clnt: RPC client structure
1222 * @buf: target buffer
1223 * @buflen: size of target buffer, in bytes
1225 * Returns zero and fills in "buf" and "buflen" if successful;
1226 * otherwise, a negative errno is returned.
1228 * This works even if the underlying transport is not currently connected,
1229 * or if the upper layer never previously provided a source address.
1231 * The result of this function call is transient: multiple calls in
1232 * succession may give different results, depending on how local
1233 * networking configuration changes over time.
1235 int rpc_localaddr(struct rpc_clnt
*clnt
, struct sockaddr
*buf
, size_t buflen
)
1237 struct sockaddr_storage address
;
1238 struct sockaddr
*sap
= (struct sockaddr
*)&address
;
1239 struct rpc_xprt
*xprt
;
1245 xprt
= rcu_dereference(clnt
->cl_xprt
);
1246 salen
= xprt
->addrlen
;
1247 memcpy(sap
, &xprt
->addr
, salen
);
1248 net
= get_net(xprt
->xprt_net
);
1251 rpc_set_port(sap
, 0);
1252 err
= rpc_sockname(net
, sap
, salen
, buf
, buflen
);
1255 /* Couldn't discover local address, return ANYADDR */
1256 return rpc_anyaddr(sap
->sa_family
, buf
, buflen
);
1259 EXPORT_SYMBOL_GPL(rpc_localaddr
);
1262 rpc_setbufsize(struct rpc_clnt
*clnt
, unsigned int sndsize
, unsigned int rcvsize
)
1264 struct rpc_xprt
*xprt
;
1267 xprt
= rcu_dereference(clnt
->cl_xprt
);
1268 if (xprt
->ops
->set_buffer_size
)
1269 xprt
->ops
->set_buffer_size(xprt
, sndsize
, rcvsize
);
1272 EXPORT_SYMBOL_GPL(rpc_setbufsize
);
1275 * rpc_protocol - Get transport protocol number for an RPC client
1276 * @clnt: RPC client to query
1279 int rpc_protocol(struct rpc_clnt
*clnt
)
1284 protocol
= rcu_dereference(clnt
->cl_xprt
)->prot
;
1288 EXPORT_SYMBOL_GPL(rpc_protocol
);
1291 * rpc_net_ns - Get the network namespace for this RPC client
1292 * @clnt: RPC client to query
1295 struct net
*rpc_net_ns(struct rpc_clnt
*clnt
)
1300 ret
= rcu_dereference(clnt
->cl_xprt
)->xprt_net
;
1304 EXPORT_SYMBOL_GPL(rpc_net_ns
);
1307 * rpc_max_payload - Get maximum payload size for a transport, in bytes
1308 * @clnt: RPC client to query
1310 * For stream transports, this is one RPC record fragment (see RFC
1311 * 1831), as we don't support multi-record requests yet. For datagram
1312 * transports, this is the size of an IP packet minus the IP, UDP, and
1315 size_t rpc_max_payload(struct rpc_clnt
*clnt
)
1320 ret
= rcu_dereference(clnt
->cl_xprt
)->max_payload
;
1324 EXPORT_SYMBOL_GPL(rpc_max_payload
);
1327 * rpc_get_timeout - Get timeout for transport in units of HZ
1328 * @clnt: RPC client to query
1330 unsigned long rpc_get_timeout(struct rpc_clnt
*clnt
)
1335 ret
= rcu_dereference(clnt
->cl_xprt
)->timeout
->to_initval
;
1339 EXPORT_SYMBOL_GPL(rpc_get_timeout
);
1342 * rpc_force_rebind - force transport to check that remote port is unchanged
1343 * @clnt: client to rebind
1346 void rpc_force_rebind(struct rpc_clnt
*clnt
)
1348 if (clnt
->cl_autobind
) {
1350 xprt_clear_bound(rcu_dereference(clnt
->cl_xprt
));
1354 EXPORT_SYMBOL_GPL(rpc_force_rebind
);
1357 * Restart an (async) RPC call from the call_prepare state.
1358 * Usually called from within the exit handler.
1361 rpc_restart_call_prepare(struct rpc_task
*task
)
1363 if (RPC_ASSASSINATED(task
))
1365 task
->tk_action
= call_start
;
1366 if (task
->tk_ops
->rpc_call_prepare
!= NULL
)
1367 task
->tk_action
= rpc_prepare_task
;
1370 EXPORT_SYMBOL_GPL(rpc_restart_call_prepare
);
1373 * Restart an (async) RPC call. Usually called from within the
1377 rpc_restart_call(struct rpc_task
*task
)
1379 if (RPC_ASSASSINATED(task
))
1381 task
->tk_action
= call_start
;
1384 EXPORT_SYMBOL_GPL(rpc_restart_call
);
1387 static const char *rpc_proc_name(const struct rpc_task
*task
)
1389 const struct rpc_procinfo
*proc
= task
->tk_msg
.rpc_proc
;
1393 return proc
->p_name
;
1404 * Other FSM states can be visited zero or more times, but
1405 * this state is visited exactly once for each RPC.
1408 call_start(struct rpc_task
*task
)
1410 struct rpc_clnt
*clnt
= task
->tk_client
;
1412 dprintk("RPC: %5u call_start %s%d proc %s (%s)\n", task
->tk_pid
,
1413 clnt
->cl_program
->name
, clnt
->cl_vers
,
1414 rpc_proc_name(task
),
1415 (RPC_IS_ASYNC(task
) ? "async" : "sync"));
1417 /* Increment call count */
1418 task
->tk_msg
.rpc_proc
->p_count
++;
1419 clnt
->cl_stats
->rpccnt
++;
1420 task
->tk_action
= call_reserve
;
1424 * 1. Reserve an RPC call slot
1427 call_reserve(struct rpc_task
*task
)
1429 dprint_status(task
);
1431 task
->tk_status
= 0;
1432 task
->tk_action
= call_reserveresult
;
1436 static void call_retry_reserve(struct rpc_task
*task
);
1439 * 1b. Grok the result of xprt_reserve()
1442 call_reserveresult(struct rpc_task
*task
)
1444 int status
= task
->tk_status
;
1446 dprint_status(task
);
1449 * After a call to xprt_reserve(), we must have either
1450 * a request slot or else an error status.
1452 task
->tk_status
= 0;
1454 if (task
->tk_rqstp
) {
1455 task
->tk_action
= call_refresh
;
1459 printk(KERN_ERR
"%s: status=%d, but no request slot, exiting\n",
1461 rpc_exit(task
, -EIO
);
1466 * Even though there was an error, we may have acquired
1467 * a request slot somehow. Make sure not to leak it.
1469 if (task
->tk_rqstp
) {
1470 printk(KERN_ERR
"%s: status=%d, request allocated anyway\n",
1477 rpc_delay(task
, HZ
>> 2);
1478 case -EAGAIN
: /* woken up; retry */
1479 task
->tk_action
= call_retry_reserve
;
1481 case -EIO
: /* probably a shutdown */
1484 printk(KERN_ERR
"%s: unrecognized error %d, exiting\n",
1488 rpc_exit(task
, status
);
1492 * 1c. Retry reserving an RPC call slot
1495 call_retry_reserve(struct rpc_task
*task
)
1497 dprint_status(task
);
1499 task
->tk_status
= 0;
1500 task
->tk_action
= call_reserveresult
;
1501 xprt_retry_reserve(task
);
1505 * 2. Bind and/or refresh the credentials
1508 call_refresh(struct rpc_task
*task
)
1510 dprint_status(task
);
1512 task
->tk_action
= call_refreshresult
;
1513 task
->tk_status
= 0;
1514 task
->tk_client
->cl_stats
->rpcauthrefresh
++;
1515 rpcauth_refreshcred(task
);
1519 * 2a. Process the results of a credential refresh
1522 call_refreshresult(struct rpc_task
*task
)
1524 int status
= task
->tk_status
;
1526 dprint_status(task
);
1528 task
->tk_status
= 0;
1529 task
->tk_action
= call_refresh
;
1532 if (rpcauth_uptodatecred(task
)) {
1533 task
->tk_action
= call_allocate
;
1536 /* Use rate-limiting and a max number of retries if refresh
1537 * had status 0 but failed to update the cred.
1540 rpc_delay(task
, 3*HZ
);
1544 if (!task
->tk_cred_retry
)
1546 task
->tk_cred_retry
--;
1547 dprintk("RPC: %5u %s: retry refresh creds\n",
1548 task
->tk_pid
, __func__
);
1551 dprintk("RPC: %5u %s: refresh creds failed with error %d\n",
1552 task
->tk_pid
, __func__
, status
);
1553 rpc_exit(task
, status
);
1557 * 2b. Allocate the buffer. For details, see sched.c:rpc_malloc.
1558 * (Note: buffer memory is freed in xprt_release).
1561 call_allocate(struct rpc_task
*task
)
1563 unsigned int slack
= task
->tk_rqstp
->rq_cred
->cr_auth
->au_cslack
;
1564 struct rpc_rqst
*req
= task
->tk_rqstp
;
1565 struct rpc_xprt
*xprt
= req
->rq_xprt
;
1566 struct rpc_procinfo
*proc
= task
->tk_msg
.rpc_proc
;
1568 dprint_status(task
);
1570 task
->tk_status
= 0;
1571 task
->tk_action
= call_bind
;
1576 if (proc
->p_proc
!= 0) {
1577 BUG_ON(proc
->p_arglen
== 0);
1578 if (proc
->p_decode
!= NULL
)
1579 BUG_ON(proc
->p_replen
== 0);
1583 * Calculate the size (in quads) of the RPC call
1584 * and reply headers, and convert both values
1587 req
->rq_callsize
= RPC_CALLHDRSIZE
+ (slack
<< 1) + proc
->p_arglen
;
1588 req
->rq_callsize
<<= 2;
1589 req
->rq_rcvsize
= RPC_REPHDRSIZE
+ slack
+ proc
->p_replen
;
1590 req
->rq_rcvsize
<<= 2;
1592 req
->rq_buffer
= xprt
->ops
->buf_alloc(task
,
1593 req
->rq_callsize
+ req
->rq_rcvsize
);
1594 if (req
->rq_buffer
!= NULL
)
1597 dprintk("RPC: %5u rpc_buffer allocation failed\n", task
->tk_pid
);
1599 if (RPC_IS_ASYNC(task
) || !fatal_signal_pending(current
)) {
1600 task
->tk_action
= call_allocate
;
1601 rpc_delay(task
, HZ
>>4);
1605 rpc_exit(task
, -ERESTARTSYS
);
1609 rpc_task_need_encode(struct rpc_task
*task
)
1611 return task
->tk_rqstp
->rq_snd_buf
.len
== 0;
1615 rpc_task_force_reencode(struct rpc_task
*task
)
1617 task
->tk_rqstp
->rq_snd_buf
.len
= 0;
1618 task
->tk_rqstp
->rq_bytes_sent
= 0;
1622 rpc_xdr_buf_init(struct xdr_buf
*buf
, void *start
, size_t len
)
1624 buf
->head
[0].iov_base
= start
;
1625 buf
->head
[0].iov_len
= len
;
1626 buf
->tail
[0].iov_len
= 0;
1634 * 3. Encode arguments of an RPC call
1637 rpc_xdr_encode(struct rpc_task
*task
)
1639 struct rpc_rqst
*req
= task
->tk_rqstp
;
1643 dprint_status(task
);
1645 rpc_xdr_buf_init(&req
->rq_snd_buf
,
1648 rpc_xdr_buf_init(&req
->rq_rcv_buf
,
1649 (char *)req
->rq_buffer
+ req
->rq_callsize
,
1652 p
= rpc_encode_header(task
);
1654 printk(KERN_INFO
"RPC: couldn't encode RPC header, exit EIO\n");
1655 rpc_exit(task
, -EIO
);
1659 encode
= task
->tk_msg
.rpc_proc
->p_encode
;
1663 task
->tk_status
= rpcauth_wrap_req(task
, encode
, req
, p
,
1664 task
->tk_msg
.rpc_argp
);
1668 * 4. Get the server port number if not yet set
1671 call_bind(struct rpc_task
*task
)
1673 struct rpc_xprt
*xprt
= task
->tk_rqstp
->rq_xprt
;
1675 dprint_status(task
);
1677 task
->tk_action
= call_connect
;
1678 if (!xprt_bound(xprt
)) {
1679 task
->tk_action
= call_bind_status
;
1680 task
->tk_timeout
= xprt
->bind_timeout
;
1681 xprt
->ops
->rpcbind(task
);
1686 * 4a. Sort out bind result
1689 call_bind_status(struct rpc_task
*task
)
1693 if (task
->tk_status
>= 0) {
1694 dprint_status(task
);
1695 task
->tk_status
= 0;
1696 task
->tk_action
= call_connect
;
1700 trace_rpc_bind_status(task
);
1701 switch (task
->tk_status
) {
1703 dprintk("RPC: %5u rpcbind out of memory\n", task
->tk_pid
);
1704 rpc_delay(task
, HZ
>> 2);
1707 dprintk("RPC: %5u remote rpcbind: RPC program/version "
1708 "unavailable\n", task
->tk_pid
);
1709 /* fail immediately if this is an RPC ping */
1710 if (task
->tk_msg
.rpc_proc
->p_proc
== 0) {
1711 status
= -EOPNOTSUPP
;
1714 if (task
->tk_rebind_retry
== 0)
1716 task
->tk_rebind_retry
--;
1717 rpc_delay(task
, 3*HZ
);
1720 dprintk("RPC: %5u rpcbind request timed out\n",
1724 /* server doesn't support any rpcbind version we know of */
1725 dprintk("RPC: %5u unrecognized remote rpcbind service\n",
1728 case -EPROTONOSUPPORT
:
1729 dprintk("RPC: %5u remote rpcbind version unavailable, retrying\n",
1731 task
->tk_status
= 0;
1732 task
->tk_action
= call_bind
;
1734 case -ECONNREFUSED
: /* connection problems */
1742 dprintk("RPC: %5u remote rpcbind unreachable: %d\n",
1743 task
->tk_pid
, task
->tk_status
);
1744 if (!RPC_IS_SOFTCONN(task
)) {
1745 rpc_delay(task
, 5*HZ
);
1748 status
= task
->tk_status
;
1751 dprintk("RPC: %5u unrecognized rpcbind error (%d)\n",
1752 task
->tk_pid
, -task
->tk_status
);
1755 rpc_exit(task
, status
);
1759 task
->tk_action
= call_timeout
;
1763 * 4b. Connect to the RPC server
1766 call_connect(struct rpc_task
*task
)
1768 struct rpc_xprt
*xprt
= task
->tk_rqstp
->rq_xprt
;
1770 dprintk("RPC: %5u call_connect xprt %p %s connected\n",
1772 (xprt_connected(xprt
) ? "is" : "is not"));
1774 task
->tk_action
= call_transmit
;
1775 if (!xprt_connected(xprt
)) {
1776 task
->tk_action
= call_connect_status
;
1777 if (task
->tk_status
< 0)
1779 if (task
->tk_flags
& RPC_TASK_NOCONNECT
) {
1780 rpc_exit(task
, -ENOTCONN
);
1788 * 4c. Sort out connect result
1791 call_connect_status(struct rpc_task
*task
)
1793 struct rpc_clnt
*clnt
= task
->tk_client
;
1794 int status
= task
->tk_status
;
1796 dprint_status(task
);
1798 trace_rpc_connect_status(task
, status
);
1799 task
->tk_status
= 0;
1801 /* if soft mounted, test if we've timed out */
1803 task
->tk_action
= call_timeout
;
1810 /* retry with existing socket, after a delay */
1811 rpc_delay(task
, 3*HZ
);
1812 if (RPC_IS_SOFTCONN(task
))
1815 task
->tk_action
= call_bind
;
1818 clnt
->cl_stats
->netreconn
++;
1819 task
->tk_action
= call_transmit
;
1822 rpc_exit(task
, status
);
1826 * 5. Transmit the RPC request, and wait for reply
1829 call_transmit(struct rpc_task
*task
)
1831 int is_retrans
= RPC_WAS_SENT(task
);
1833 dprint_status(task
);
1835 task
->tk_action
= call_status
;
1836 if (task
->tk_status
< 0)
1838 if (!xprt_prepare_transmit(task
))
1840 task
->tk_action
= call_transmit_status
;
1841 /* Encode here so that rpcsec_gss can use correct sequence number. */
1842 if (rpc_task_need_encode(task
)) {
1843 rpc_xdr_encode(task
);
1844 /* Did the encode result in an error condition? */
1845 if (task
->tk_status
!= 0) {
1846 /* Was the error nonfatal? */
1847 if (task
->tk_status
== -EAGAIN
)
1848 rpc_delay(task
, HZ
>> 4);
1850 rpc_exit(task
, task
->tk_status
);
1854 xprt_transmit(task
);
1855 if (task
->tk_status
< 0)
1858 task
->tk_client
->cl_stats
->rpcretrans
++;
1860 * On success, ensure that we call xprt_end_transmit() before sleeping
1861 * in order to allow access to the socket to other RPC requests.
1863 call_transmit_status(task
);
1864 if (rpc_reply_expected(task
))
1866 task
->tk_action
= rpc_exit_task
;
1867 rpc_wake_up_queued_task(&task
->tk_rqstp
->rq_xprt
->pending
, task
);
1871 * 5a. Handle cleanup after a transmission
1874 call_transmit_status(struct rpc_task
*task
)
1876 task
->tk_action
= call_status
;
1879 * Common case: success. Force the compiler to put this
1882 if (task
->tk_status
== 0) {
1883 xprt_end_transmit(task
);
1884 rpc_task_force_reencode(task
);
1888 switch (task
->tk_status
) {
1892 dprint_status(task
);
1893 xprt_end_transmit(task
);
1894 rpc_task_force_reencode(task
);
1897 * Special cases: if we've been waiting on the
1898 * socket's write_space() callback, or if the
1899 * socket just returned a connection error,
1900 * then hold onto the transport lock.
1906 if (RPC_IS_SOFTCONN(task
)) {
1907 xprt_end_transmit(task
);
1908 rpc_exit(task
, task
->tk_status
);
1915 rpc_task_force_reencode(task
);
1919 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
1921 * 5b. Send the backchannel RPC reply. On error, drop the reply. In
1922 * addition, disconnect on connectivity errors.
1925 call_bc_transmit(struct rpc_task
*task
)
1927 struct rpc_rqst
*req
= task
->tk_rqstp
;
1929 if (!xprt_prepare_transmit(task
)) {
1931 * Could not reserve the transport. Try again after the
1932 * transport is released.
1934 task
->tk_status
= 0;
1935 task
->tk_action
= call_bc_transmit
;
1939 task
->tk_action
= rpc_exit_task
;
1940 if (task
->tk_status
< 0) {
1941 printk(KERN_NOTICE
"RPC: Could not send backchannel reply "
1942 "error: %d\n", task
->tk_status
);
1946 xprt_transmit(task
);
1947 xprt_end_transmit(task
);
1948 dprint_status(task
);
1949 switch (task
->tk_status
) {
1958 * Problem reaching the server. Disconnect and let the
1959 * forechannel reestablish the connection. The server will
1960 * have to retransmit the backchannel request and we'll
1961 * reprocess it. Since these ops are idempotent, there's no
1962 * need to cache our reply at this time.
1964 printk(KERN_NOTICE
"RPC: Could not send backchannel reply "
1965 "error: %d\n", task
->tk_status
);
1966 xprt_conditional_disconnect(req
->rq_xprt
,
1967 req
->rq_connect_cookie
);
1971 * We were unable to reply and will have to drop the
1972 * request. The server should reconnect and retransmit.
1974 WARN_ON_ONCE(task
->tk_status
== -EAGAIN
);
1975 printk(KERN_NOTICE
"RPC: Could not send backchannel reply "
1976 "error: %d\n", task
->tk_status
);
1979 rpc_wake_up_queued_task(&req
->rq_xprt
->pending
, task
);
1981 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
1984 * 6. Sort out the RPC call status
1987 call_status(struct rpc_task
*task
)
1989 struct rpc_clnt
*clnt
= task
->tk_client
;
1990 struct rpc_rqst
*req
= task
->tk_rqstp
;
1993 if (req
->rq_reply_bytes_recvd
> 0 && !req
->rq_bytes_sent
)
1994 task
->tk_status
= req
->rq_reply_bytes_recvd
;
1996 dprint_status(task
);
1998 status
= task
->tk_status
;
2000 task
->tk_action
= call_decode
;
2004 trace_rpc_call_status(task
);
2005 task
->tk_status
= 0;
2011 * Delay any retries for 3 seconds, then handle as if it
2014 rpc_delay(task
, 3*HZ
);
2016 task
->tk_action
= call_timeout
;
2017 if (!(task
->tk_flags
& RPC_TASK_NO_RETRANS_TIMEOUT
)
2018 && task
->tk_client
->cl_discrtry
)
2019 xprt_conditional_disconnect(req
->rq_xprt
,
2020 req
->rq_connect_cookie
);
2025 rpc_force_rebind(clnt
);
2026 rpc_delay(task
, 3*HZ
);
2029 task
->tk_action
= call_bind
;
2032 task
->tk_action
= call_transmit
;
2035 /* shutdown or soft timeout */
2036 rpc_exit(task
, status
);
2039 if (clnt
->cl_chatty
)
2040 printk("%s: RPC call returned error %d\n",
2041 clnt
->cl_program
->name
, -status
);
2042 rpc_exit(task
, status
);
2047 * 6a. Handle RPC timeout
2048 * We do not release the request slot, so we keep using the
2049 * same XID for all retransmits.
2052 call_timeout(struct rpc_task
*task
)
2054 struct rpc_clnt
*clnt
= task
->tk_client
;
2056 if (xprt_adjust_timeout(task
->tk_rqstp
) == 0) {
2057 dprintk("RPC: %5u call_timeout (minor)\n", task
->tk_pid
);
2061 dprintk("RPC: %5u call_timeout (major)\n", task
->tk_pid
);
2062 task
->tk_timeouts
++;
2064 if (RPC_IS_SOFTCONN(task
)) {
2065 rpc_exit(task
, -ETIMEDOUT
);
2068 if (RPC_IS_SOFT(task
)) {
2069 if (clnt
->cl_chatty
) {
2071 printk(KERN_NOTICE
"%s: server %s not responding, timed out\n",
2072 clnt
->cl_program
->name
,
2073 rcu_dereference(clnt
->cl_xprt
)->servername
);
2076 if (task
->tk_flags
& RPC_TASK_TIMEOUT
)
2077 rpc_exit(task
, -ETIMEDOUT
);
2079 rpc_exit(task
, -EIO
);
2083 if (!(task
->tk_flags
& RPC_CALL_MAJORSEEN
)) {
2084 task
->tk_flags
|= RPC_CALL_MAJORSEEN
;
2085 if (clnt
->cl_chatty
) {
2087 printk(KERN_NOTICE
"%s: server %s not responding, still trying\n",
2088 clnt
->cl_program
->name
,
2089 rcu_dereference(clnt
->cl_xprt
)->servername
);
2093 rpc_force_rebind(clnt
);
2095 * Did our request time out due to an RPCSEC_GSS out-of-sequence
2096 * event? RFC2203 requires the server to drop all such requests.
2098 rpcauth_invalcred(task
);
2101 task
->tk_action
= call_bind
;
2102 task
->tk_status
= 0;
2106 * 7. Decode the RPC reply
2109 call_decode(struct rpc_task
*task
)
2111 struct rpc_clnt
*clnt
= task
->tk_client
;
2112 struct rpc_rqst
*req
= task
->tk_rqstp
;
2113 kxdrdproc_t decode
= task
->tk_msg
.rpc_proc
->p_decode
;
2116 dprint_status(task
);
2118 if (task
->tk_flags
& RPC_CALL_MAJORSEEN
) {
2119 if (clnt
->cl_chatty
) {
2121 printk(KERN_NOTICE
"%s: server %s OK\n",
2122 clnt
->cl_program
->name
,
2123 rcu_dereference(clnt
->cl_xprt
)->servername
);
2126 task
->tk_flags
&= ~RPC_CALL_MAJORSEEN
;
2130 * Ensure that we see all writes made by xprt_complete_rqst()
2131 * before it changed req->rq_reply_bytes_recvd.
2134 req
->rq_rcv_buf
.len
= req
->rq_private_buf
.len
;
2136 /* Check that the softirq receive buffer is valid */
2137 WARN_ON(memcmp(&req
->rq_rcv_buf
, &req
->rq_private_buf
,
2138 sizeof(req
->rq_rcv_buf
)) != 0);
2140 if (req
->rq_rcv_buf
.len
< 12) {
2141 if (!RPC_IS_SOFT(task
)) {
2142 task
->tk_action
= call_bind
;
2145 dprintk("RPC: %s: too small RPC reply size (%d bytes)\n",
2146 clnt
->cl_program
->name
, task
->tk_status
);
2147 task
->tk_action
= call_timeout
;
2151 p
= rpc_verify_header(task
);
2153 if (p
== ERR_PTR(-EAGAIN
))
2158 task
->tk_action
= rpc_exit_task
;
2161 task
->tk_status
= rpcauth_unwrap_resp(task
, decode
, req
, p
,
2162 task
->tk_msg
.rpc_resp
);
2164 dprintk("RPC: %5u call_decode result %d\n", task
->tk_pid
,
2168 task
->tk_status
= 0;
2169 /* Note: rpc_verify_header() may have freed the RPC slot */
2170 if (task
->tk_rqstp
== req
) {
2171 req
->rq_reply_bytes_recvd
= req
->rq_rcv_buf
.len
= 0;
2172 if (task
->tk_client
->cl_discrtry
)
2173 xprt_conditional_disconnect(req
->rq_xprt
,
2174 req
->rq_connect_cookie
);
2179 rpc_encode_header(struct rpc_task
*task
)
2181 struct rpc_clnt
*clnt
= task
->tk_client
;
2182 struct rpc_rqst
*req
= task
->tk_rqstp
;
2183 __be32
*p
= req
->rq_svec
[0].iov_base
;
2185 /* FIXME: check buffer size? */
2187 p
= xprt_skip_transport_header(req
->rq_xprt
, p
);
2188 *p
++ = req
->rq_xid
; /* XID */
2189 *p
++ = htonl(RPC_CALL
); /* CALL */
2190 *p
++ = htonl(RPC_VERSION
); /* RPC version */
2191 *p
++ = htonl(clnt
->cl_prog
); /* program number */
2192 *p
++ = htonl(clnt
->cl_vers
); /* program version */
2193 *p
++ = htonl(task
->tk_msg
.rpc_proc
->p_proc
); /* procedure */
2194 p
= rpcauth_marshcred(task
, p
);
2195 req
->rq_slen
= xdr_adjust_iovec(&req
->rq_svec
[0], p
);
2200 rpc_verify_header(struct rpc_task
*task
)
2202 struct rpc_clnt
*clnt
= task
->tk_client
;
2203 struct kvec
*iov
= &task
->tk_rqstp
->rq_rcv_buf
.head
[0];
2204 int len
= task
->tk_rqstp
->rq_rcv_buf
.len
>> 2;
2205 __be32
*p
= iov
->iov_base
;
2207 int error
= -EACCES
;
2209 if ((task
->tk_rqstp
->rq_rcv_buf
.len
& 3) != 0) {
2210 /* RFC-1014 says that the representation of XDR data must be a
2211 * multiple of four bytes
2212 * - if it isn't pointer subtraction in the NFS client may give
2215 dprintk("RPC: %5u %s: XDR representation not a multiple of"
2216 " 4 bytes: 0x%x\n", task
->tk_pid
, __func__
,
2217 task
->tk_rqstp
->rq_rcv_buf
.len
);
2224 p
+= 1; /* skip XID */
2225 if ((n
= ntohl(*p
++)) != RPC_REPLY
) {
2226 dprintk("RPC: %5u %s: not an RPC reply: %x\n",
2227 task
->tk_pid
, __func__
, n
);
2232 if ((n
= ntohl(*p
++)) != RPC_MSG_ACCEPTED
) {
2235 switch ((n
= ntohl(*p
++))) {
2236 case RPC_AUTH_ERROR
:
2239 dprintk("RPC: %5u %s: RPC call version mismatch!\n",
2240 task
->tk_pid
, __func__
);
2241 error
= -EPROTONOSUPPORT
;
2244 dprintk("RPC: %5u %s: RPC call rejected, "
2245 "unknown error: %x\n",
2246 task
->tk_pid
, __func__
, n
);
2252 switch ((n
= ntohl(*p
++))) {
2253 case RPC_AUTH_REJECTEDCRED
:
2254 case RPC_AUTH_REJECTEDVERF
:
2255 case RPCSEC_GSS_CREDPROBLEM
:
2256 case RPCSEC_GSS_CTXPROBLEM
:
2257 if (!task
->tk_cred_retry
)
2259 task
->tk_cred_retry
--;
2260 dprintk("RPC: %5u %s: retry stale creds\n",
2261 task
->tk_pid
, __func__
);
2262 rpcauth_invalcred(task
);
2263 /* Ensure we obtain a new XID! */
2265 task
->tk_action
= call_reserve
;
2267 case RPC_AUTH_BADCRED
:
2268 case RPC_AUTH_BADVERF
:
2269 /* possibly garbled cred/verf? */
2270 if (!task
->tk_garb_retry
)
2272 task
->tk_garb_retry
--;
2273 dprintk("RPC: %5u %s: retry garbled creds\n",
2274 task
->tk_pid
, __func__
);
2275 task
->tk_action
= call_bind
;
2277 case RPC_AUTH_TOOWEAK
:
2279 printk(KERN_NOTICE
"RPC: server %s requires stronger "
2280 "authentication.\n",
2281 rcu_dereference(clnt
->cl_xprt
)->servername
);
2285 dprintk("RPC: %5u %s: unknown auth error: %x\n",
2286 task
->tk_pid
, __func__
, n
);
2289 dprintk("RPC: %5u %s: call rejected %d\n",
2290 task
->tk_pid
, __func__
, n
);
2293 p
= rpcauth_checkverf(task
, p
);
2296 dprintk("RPC: %5u %s: auth check failed with %d\n",
2297 task
->tk_pid
, __func__
, error
);
2298 goto out_garbage
; /* bad verifier, retry */
2300 len
= p
- (__be32
*)iov
->iov_base
- 1;
2303 switch ((n
= ntohl(*p
++))) {
2306 case RPC_PROG_UNAVAIL
:
2307 dprintk_rcu("RPC: %5u %s: program %u is unsupported "
2308 "by server %s\n", task
->tk_pid
, __func__
,
2309 (unsigned int)clnt
->cl_prog
,
2310 rcu_dereference(clnt
->cl_xprt
)->servername
);
2311 error
= -EPFNOSUPPORT
;
2313 case RPC_PROG_MISMATCH
:
2314 dprintk_rcu("RPC: %5u %s: program %u, version %u unsupported "
2315 "by server %s\n", task
->tk_pid
, __func__
,
2316 (unsigned int)clnt
->cl_prog
,
2317 (unsigned int)clnt
->cl_vers
,
2318 rcu_dereference(clnt
->cl_xprt
)->servername
);
2319 error
= -EPROTONOSUPPORT
;
2321 case RPC_PROC_UNAVAIL
:
2322 dprintk_rcu("RPC: %5u %s: proc %s unsupported by program %u, "
2323 "version %u on server %s\n",
2324 task
->tk_pid
, __func__
,
2325 rpc_proc_name(task
),
2326 clnt
->cl_prog
, clnt
->cl_vers
,
2327 rcu_dereference(clnt
->cl_xprt
)->servername
);
2328 error
= -EOPNOTSUPP
;
2330 case RPC_GARBAGE_ARGS
:
2331 dprintk("RPC: %5u %s: server saw garbage\n",
2332 task
->tk_pid
, __func__
);
2335 dprintk("RPC: %5u %s: server accept status: %x\n",
2336 task
->tk_pid
, __func__
, n
);
2341 clnt
->cl_stats
->rpcgarbage
++;
2342 if (task
->tk_garb_retry
) {
2343 task
->tk_garb_retry
--;
2344 dprintk("RPC: %5u %s: retrying\n",
2345 task
->tk_pid
, __func__
);
2346 task
->tk_action
= call_bind
;
2348 return ERR_PTR(-EAGAIN
);
2351 rpc_exit(task
, error
);
2352 dprintk("RPC: %5u %s: call failed with error %d\n", task
->tk_pid
,
2354 return ERR_PTR(error
);
2356 dprintk("RPC: %5u %s: server reply was truncated.\n", task
->tk_pid
,
2361 static void rpcproc_encode_null(void *rqstp
, struct xdr_stream
*xdr
, void *obj
)
2365 static int rpcproc_decode_null(void *rqstp
, struct xdr_stream
*xdr
, void *obj
)
2370 static struct rpc_procinfo rpcproc_null
= {
2371 .p_encode
= rpcproc_encode_null
,
2372 .p_decode
= rpcproc_decode_null
,
2375 static int rpc_ping(struct rpc_clnt
*clnt
)
2377 struct rpc_message msg
= {
2378 .rpc_proc
= &rpcproc_null
,
2381 msg
.rpc_cred
= authnull_ops
.lookup_cred(NULL
, NULL
, 0);
2382 err
= rpc_call_sync(clnt
, &msg
, RPC_TASK_SOFT
| RPC_TASK_SOFTCONN
);
2383 put_rpccred(msg
.rpc_cred
);
2387 struct rpc_task
*rpc_call_null(struct rpc_clnt
*clnt
, struct rpc_cred
*cred
, int flags
)
2389 struct rpc_message msg
= {
2390 .rpc_proc
= &rpcproc_null
,
2393 struct rpc_task_setup task_setup_data
= {
2395 .rpc_message
= &msg
,
2396 .callback_ops
= &rpc_default_ops
,
2399 return rpc_run_task(&task_setup_data
);
2401 EXPORT_SYMBOL_GPL(rpc_call_null
);
2404 static void rpc_show_header(void)
2406 printk(KERN_INFO
"-pid- flgs status -client- --rqstp- "
2407 "-timeout ---ops--\n");
2410 static void rpc_show_task(const struct rpc_clnt
*clnt
,
2411 const struct rpc_task
*task
)
2413 const char *rpc_waitq
= "none";
2415 if (RPC_IS_QUEUED(task
))
2416 rpc_waitq
= rpc_qname(task
->tk_waitqueue
);
2418 printk(KERN_INFO
"%5u %04x %6d %8p %8p %8ld %8p %sv%u %s a:%ps q:%s\n",
2419 task
->tk_pid
, task
->tk_flags
, task
->tk_status
,
2420 clnt
, task
->tk_rqstp
, task
->tk_timeout
, task
->tk_ops
,
2421 clnt
->cl_program
->name
, clnt
->cl_vers
, rpc_proc_name(task
),
2422 task
->tk_action
, rpc_waitq
);
2425 void rpc_show_tasks(struct net
*net
)
2427 struct rpc_clnt
*clnt
;
2428 struct rpc_task
*task
;
2430 struct sunrpc_net
*sn
= net_generic(net
, sunrpc_net_id
);
2432 spin_lock(&sn
->rpc_client_lock
);
2433 list_for_each_entry(clnt
, &sn
->all_clients
, cl_clients
) {
2434 spin_lock(&clnt
->cl_lock
);
2435 list_for_each_entry(task
, &clnt
->cl_tasks
, tk_task
) {
2440 rpc_show_task(clnt
, task
);
2442 spin_unlock(&clnt
->cl_lock
);
2444 spin_unlock(&sn
->rpc_client_lock
);