2 * linux/net/sunrpc/clnt.c
4 * This file contains the high-level RPC interface.
5 * It is modeled as a finite state machine to support both synchronous
6 * and asynchronous requests.
8 * - RPC header generation and argument serialization.
9 * - Credential refresh.
10 * - TCP connect handling.
11 * - Retry of operation when it is suspected the operation failed because
12 * of uid squashing on the server, or when the credentials were stale
13 * and need to be refreshed, or when a packet was damaged in transit.
14 * This may be have to be moved to the VFS layer.
16 * Copyright (C) 1992,1993 Rick Sladkey <jrs@world.std.com>
17 * Copyright (C) 1995,1996 Olaf Kirch <okir@monad.swb.de>
21 #include <linux/module.h>
22 #include <linux/types.h>
23 #include <linux/kallsyms.h>
25 #include <linux/namei.h>
26 #include <linux/mount.h>
27 #include <linux/slab.h>
28 #include <linux/rcupdate.h>
29 #include <linux/utsname.h>
30 #include <linux/workqueue.h>
32 #include <linux/in6.h>
35 #include <linux/sunrpc/clnt.h>
36 #include <linux/sunrpc/addr.h>
37 #include <linux/sunrpc/rpc_pipe_fs.h>
38 #include <linux/sunrpc/metrics.h>
39 #include <linux/sunrpc/bc_xprt.h>
40 #include <trace/events/sunrpc.h>
45 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
46 # define RPCDBG_FACILITY RPCDBG_CALL
49 #define dprint_status(t) \
50 dprintk("RPC: %5u %s (status %d)\n", t->tk_pid, \
51 __func__, t->tk_status)
54 * All RPC clients are linked into this list
57 static DECLARE_WAIT_QUEUE_HEAD(destroy_wait
);
60 static void call_start(struct rpc_task
*task
);
61 static void call_reserve(struct rpc_task
*task
);
62 static void call_reserveresult(struct rpc_task
*task
);
63 static void call_allocate(struct rpc_task
*task
);
64 static void call_encode(struct rpc_task
*task
);
65 static void call_decode(struct rpc_task
*task
);
66 static void call_bind(struct rpc_task
*task
);
67 static void call_bind_status(struct rpc_task
*task
);
68 static void call_transmit(struct rpc_task
*task
);
69 static void call_status(struct rpc_task
*task
);
70 static void call_transmit_status(struct rpc_task
*task
);
71 static void call_refresh(struct rpc_task
*task
);
72 static void call_refreshresult(struct rpc_task
*task
);
73 static void call_connect(struct rpc_task
*task
);
74 static void call_connect_status(struct rpc_task
*task
);
76 static int rpc_encode_header(struct rpc_task
*task
,
77 struct xdr_stream
*xdr
);
78 static int rpc_decode_header(struct rpc_task
*task
,
79 struct xdr_stream
*xdr
);
80 static int rpc_ping(struct rpc_clnt
*clnt
);
81 static void rpc_check_timeout(struct rpc_task
*task
);
83 static void rpc_register_client(struct rpc_clnt
*clnt
)
85 struct net
*net
= rpc_net_ns(clnt
);
86 struct sunrpc_net
*sn
= net_generic(net
, sunrpc_net_id
);
88 spin_lock(&sn
->rpc_client_lock
);
89 list_add(&clnt
->cl_clients
, &sn
->all_clients
);
90 spin_unlock(&sn
->rpc_client_lock
);
93 static void rpc_unregister_client(struct rpc_clnt
*clnt
)
95 struct net
*net
= rpc_net_ns(clnt
);
96 struct sunrpc_net
*sn
= net_generic(net
, sunrpc_net_id
);
98 spin_lock(&sn
->rpc_client_lock
);
99 list_del(&clnt
->cl_clients
);
100 spin_unlock(&sn
->rpc_client_lock
);
103 static void __rpc_clnt_remove_pipedir(struct rpc_clnt
*clnt
)
105 rpc_remove_client_dir(clnt
);
108 static void rpc_clnt_remove_pipedir(struct rpc_clnt
*clnt
)
110 struct net
*net
= rpc_net_ns(clnt
);
111 struct super_block
*pipefs_sb
;
113 pipefs_sb
= rpc_get_sb_net(net
);
115 __rpc_clnt_remove_pipedir(clnt
);
120 static struct dentry
*rpc_setup_pipedir_sb(struct super_block
*sb
,
121 struct rpc_clnt
*clnt
)
123 static uint32_t clntid
;
124 const char *dir_name
= clnt
->cl_program
->pipe_dir_name
;
126 struct dentry
*dir
, *dentry
;
128 dir
= rpc_d_lookup_sb(sb
, dir_name
);
130 pr_info("RPC: pipefs directory doesn't exist: %s\n", dir_name
);
134 snprintf(name
, sizeof(name
), "clnt%x", (unsigned int)clntid
++);
135 name
[sizeof(name
) - 1] = '\0';
136 dentry
= rpc_create_client_dir(dir
, name
, clnt
);
139 if (dentry
== ERR_PTR(-EEXIST
))
141 printk(KERN_INFO
"RPC: Couldn't create pipefs entry"
142 " %s/%s, error %ld\n",
143 dir_name
, name
, PTR_ERR(dentry
));
151 rpc_setup_pipedir(struct super_block
*pipefs_sb
, struct rpc_clnt
*clnt
)
153 struct dentry
*dentry
;
155 if (clnt
->cl_program
->pipe_dir_name
!= NULL
) {
156 dentry
= rpc_setup_pipedir_sb(pipefs_sb
, clnt
);
158 return PTR_ERR(dentry
);
163 static int rpc_clnt_skip_event(struct rpc_clnt
*clnt
, unsigned long event
)
165 if (clnt
->cl_program
->pipe_dir_name
== NULL
)
169 case RPC_PIPEFS_MOUNT
:
170 if (clnt
->cl_pipedir_objects
.pdh_dentry
!= NULL
)
172 if (atomic_read(&clnt
->cl_count
) == 0)
175 case RPC_PIPEFS_UMOUNT
:
176 if (clnt
->cl_pipedir_objects
.pdh_dentry
== NULL
)
183 static int __rpc_clnt_handle_event(struct rpc_clnt
*clnt
, unsigned long event
,
184 struct super_block
*sb
)
186 struct dentry
*dentry
;
189 case RPC_PIPEFS_MOUNT
:
190 dentry
= rpc_setup_pipedir_sb(sb
, clnt
);
194 return PTR_ERR(dentry
);
196 case RPC_PIPEFS_UMOUNT
:
197 __rpc_clnt_remove_pipedir(clnt
);
200 printk(KERN_ERR
"%s: unknown event: %ld\n", __func__
, event
);
206 static int __rpc_pipefs_event(struct rpc_clnt
*clnt
, unsigned long event
,
207 struct super_block
*sb
)
211 for (;; clnt
= clnt
->cl_parent
) {
212 if (!rpc_clnt_skip_event(clnt
, event
))
213 error
= __rpc_clnt_handle_event(clnt
, event
, sb
);
214 if (error
|| clnt
== clnt
->cl_parent
)
220 static struct rpc_clnt
*rpc_get_client_for_event(struct net
*net
, int event
)
222 struct sunrpc_net
*sn
= net_generic(net
, sunrpc_net_id
);
223 struct rpc_clnt
*clnt
;
225 spin_lock(&sn
->rpc_client_lock
);
226 list_for_each_entry(clnt
, &sn
->all_clients
, cl_clients
) {
227 if (rpc_clnt_skip_event(clnt
, event
))
229 spin_unlock(&sn
->rpc_client_lock
);
232 spin_unlock(&sn
->rpc_client_lock
);
236 static int rpc_pipefs_event(struct notifier_block
*nb
, unsigned long event
,
239 struct super_block
*sb
= ptr
;
240 struct rpc_clnt
*clnt
;
243 while ((clnt
= rpc_get_client_for_event(sb
->s_fs_info
, event
))) {
244 error
= __rpc_pipefs_event(clnt
, event
, sb
);
251 static struct notifier_block rpc_clients_block
= {
252 .notifier_call
= rpc_pipefs_event
,
253 .priority
= SUNRPC_PIPEFS_RPC_PRIO
,
256 int rpc_clients_notifier_register(void)
258 return rpc_pipefs_notifier_register(&rpc_clients_block
);
261 void rpc_clients_notifier_unregister(void)
263 return rpc_pipefs_notifier_unregister(&rpc_clients_block
);
266 static struct rpc_xprt
*rpc_clnt_set_transport(struct rpc_clnt
*clnt
,
267 struct rpc_xprt
*xprt
,
268 const struct rpc_timeout
*timeout
)
270 struct rpc_xprt
*old
;
272 spin_lock(&clnt
->cl_lock
);
273 old
= rcu_dereference_protected(clnt
->cl_xprt
,
274 lockdep_is_held(&clnt
->cl_lock
));
276 if (!xprt_bound(xprt
))
277 clnt
->cl_autobind
= 1;
279 clnt
->cl_timeout
= timeout
;
280 rcu_assign_pointer(clnt
->cl_xprt
, xprt
);
281 spin_unlock(&clnt
->cl_lock
);
286 static void rpc_clnt_set_nodename(struct rpc_clnt
*clnt
, const char *nodename
)
288 clnt
->cl_nodelen
= strlcpy(clnt
->cl_nodename
,
289 nodename
, sizeof(clnt
->cl_nodename
));
292 static int rpc_client_register(struct rpc_clnt
*clnt
,
293 rpc_authflavor_t pseudoflavor
,
294 const char *client_name
)
296 struct rpc_auth_create_args auth_args
= {
297 .pseudoflavor
= pseudoflavor
,
298 .target_name
= client_name
,
300 struct rpc_auth
*auth
;
301 struct net
*net
= rpc_net_ns(clnt
);
302 struct super_block
*pipefs_sb
;
305 rpc_clnt_debugfs_register(clnt
);
307 pipefs_sb
= rpc_get_sb_net(net
);
309 err
= rpc_setup_pipedir(pipefs_sb
, clnt
);
314 rpc_register_client(clnt
);
318 auth
= rpcauth_create(&auth_args
, clnt
);
320 dprintk("RPC: Couldn't create auth handle (flavor %u)\n",
327 pipefs_sb
= rpc_get_sb_net(net
);
328 rpc_unregister_client(clnt
);
329 __rpc_clnt_remove_pipedir(clnt
);
333 rpc_clnt_debugfs_unregister(clnt
);
337 static DEFINE_IDA(rpc_clids
);
339 void rpc_cleanup_clids(void)
341 ida_destroy(&rpc_clids
);
344 static int rpc_alloc_clid(struct rpc_clnt
*clnt
)
348 clid
= ida_simple_get(&rpc_clids
, 0, 0, GFP_KERNEL
);
351 clnt
->cl_clid
= clid
;
355 static void rpc_free_clid(struct rpc_clnt
*clnt
)
357 ida_simple_remove(&rpc_clids
, clnt
->cl_clid
);
360 static struct rpc_clnt
* rpc_new_client(const struct rpc_create_args
*args
,
361 struct rpc_xprt_switch
*xps
,
362 struct rpc_xprt
*xprt
,
363 struct rpc_clnt
*parent
)
365 const struct rpc_program
*program
= args
->program
;
366 const struct rpc_version
*version
;
367 struct rpc_clnt
*clnt
= NULL
;
368 const struct rpc_timeout
*timeout
;
369 const char *nodename
= args
->nodename
;
372 /* sanity check the name before trying to print it */
373 dprintk("RPC: creating %s client for %s (xprt %p)\n",
374 program
->name
, args
->servername
, xprt
);
381 if (args
->version
>= program
->nrvers
)
383 version
= program
->version
[args
->version
];
388 clnt
= kzalloc(sizeof(*clnt
), GFP_KERNEL
);
391 clnt
->cl_parent
= parent
? : clnt
;
393 err
= rpc_alloc_clid(clnt
);
397 clnt
->cl_cred
= get_cred(args
->cred
);
398 clnt
->cl_procinfo
= version
->procs
;
399 clnt
->cl_maxproc
= version
->nrprocs
;
400 clnt
->cl_prog
= args
->prognumber
? : program
->number
;
401 clnt
->cl_vers
= version
->number
;
402 clnt
->cl_stats
= program
->stats
;
403 clnt
->cl_metrics
= rpc_alloc_iostats(clnt
);
404 rpc_init_pipe_dir_head(&clnt
->cl_pipedir_objects
);
406 if (clnt
->cl_metrics
== NULL
)
408 clnt
->cl_program
= program
;
409 INIT_LIST_HEAD(&clnt
->cl_tasks
);
410 spin_lock_init(&clnt
->cl_lock
);
412 timeout
= xprt
->timeout
;
413 if (args
->timeout
!= NULL
) {
414 memcpy(&clnt
->cl_timeout_default
, args
->timeout
,
415 sizeof(clnt
->cl_timeout_default
));
416 timeout
= &clnt
->cl_timeout_default
;
419 rpc_clnt_set_transport(clnt
, xprt
, timeout
);
420 xprt_iter_init(&clnt
->cl_xpi
, xps
);
421 xprt_switch_put(xps
);
423 clnt
->cl_rtt
= &clnt
->cl_rtt_default
;
424 rpc_init_rtt(&clnt
->cl_rtt_default
, clnt
->cl_timeout
->to_initval
);
426 atomic_set(&clnt
->cl_count
, 1);
428 if (nodename
== NULL
)
429 nodename
= utsname()->nodename
;
430 /* save the nodename */
431 rpc_clnt_set_nodename(clnt
, nodename
);
433 err
= rpc_client_register(clnt
, args
->authflavor
, args
->client_name
);
437 atomic_inc(&parent
->cl_count
);
441 rpc_free_iostats(clnt
->cl_metrics
);
443 put_cred(clnt
->cl_cred
);
450 xprt_switch_put(xps
);
455 static struct rpc_clnt
*rpc_create_xprt(struct rpc_create_args
*args
,
456 struct rpc_xprt
*xprt
)
458 struct rpc_clnt
*clnt
= NULL
;
459 struct rpc_xprt_switch
*xps
;
461 if (args
->bc_xprt
&& args
->bc_xprt
->xpt_bc_xps
) {
462 WARN_ON_ONCE(!(args
->protocol
& XPRT_TRANSPORT_BC
));
463 xps
= args
->bc_xprt
->xpt_bc_xps
;
464 xprt_switch_get(xps
);
466 xps
= xprt_switch_alloc(xprt
, GFP_KERNEL
);
469 return ERR_PTR(-ENOMEM
);
472 xprt_switch_get(xps
);
473 xprt
->bc_xprt
->xpt_bc_xps
= xps
;
476 clnt
= rpc_new_client(args
, xps
, xprt
, NULL
);
480 if (!(args
->flags
& RPC_CLNT_CREATE_NOPING
)) {
481 int err
= rpc_ping(clnt
);
483 rpc_shutdown_client(clnt
);
488 clnt
->cl_softrtry
= 1;
489 if (args
->flags
& (RPC_CLNT_CREATE_HARDRTRY
|RPC_CLNT_CREATE_SOFTERR
)) {
490 clnt
->cl_softrtry
= 0;
491 if (args
->flags
& RPC_CLNT_CREATE_SOFTERR
)
492 clnt
->cl_softerr
= 1;
495 if (args
->flags
& RPC_CLNT_CREATE_AUTOBIND
)
496 clnt
->cl_autobind
= 1;
497 if (args
->flags
& RPC_CLNT_CREATE_NO_RETRANS_TIMEOUT
)
498 clnt
->cl_noretranstimeo
= 1;
499 if (args
->flags
& RPC_CLNT_CREATE_DISCRTRY
)
500 clnt
->cl_discrtry
= 1;
501 if (!(args
->flags
& RPC_CLNT_CREATE_QUIET
))
508 * rpc_create - create an RPC client and transport with one call
509 * @args: rpc_clnt create argument structure
511 * Creates and initializes an RPC transport and an RPC client.
513 * It can ping the server in order to determine if it is up, and to see if
514 * it supports this program and version. RPC_CLNT_CREATE_NOPING disables
515 * this behavior so asynchronous tasks can also use rpc_create.
517 struct rpc_clnt
*rpc_create(struct rpc_create_args
*args
)
519 struct rpc_xprt
*xprt
;
520 struct xprt_create xprtargs
= {
522 .ident
= args
->protocol
,
523 .srcaddr
= args
->saddress
,
524 .dstaddr
= args
->address
,
525 .addrlen
= args
->addrsize
,
526 .servername
= args
->servername
,
527 .bc_xprt
= args
->bc_xprt
,
532 WARN_ON_ONCE(!(args
->protocol
& XPRT_TRANSPORT_BC
));
533 xprt
= args
->bc_xprt
->xpt_bc_xprt
;
536 return rpc_create_xprt(args
, xprt
);
540 if (args
->flags
& RPC_CLNT_CREATE_INFINITE_SLOTS
)
541 xprtargs
.flags
|= XPRT_CREATE_INFINITE_SLOTS
;
542 if (args
->flags
& RPC_CLNT_CREATE_NO_IDLE_TIMEOUT
)
543 xprtargs
.flags
|= XPRT_CREATE_NO_IDLE_TIMEOUT
;
545 * If the caller chooses not to specify a hostname, whip
546 * up a string representation of the passed-in address.
548 if (xprtargs
.servername
== NULL
) {
549 struct sockaddr_un
*sun
=
550 (struct sockaddr_un
*)args
->address
;
551 struct sockaddr_in
*sin
=
552 (struct sockaddr_in
*)args
->address
;
553 struct sockaddr_in6
*sin6
=
554 (struct sockaddr_in6
*)args
->address
;
556 servername
[0] = '\0';
557 switch (args
->address
->sa_family
) {
559 snprintf(servername
, sizeof(servername
), "%s",
563 snprintf(servername
, sizeof(servername
), "%pI4",
564 &sin
->sin_addr
.s_addr
);
567 snprintf(servername
, sizeof(servername
), "%pI6",
571 /* caller wants default server name, but
572 * address family isn't recognized. */
573 return ERR_PTR(-EINVAL
);
575 xprtargs
.servername
= servername
;
578 xprt
= xprt_create_transport(&xprtargs
);
580 return (struct rpc_clnt
*)xprt
;
583 * By default, kernel RPC client connects from a reserved port.
584 * CAP_NET_BIND_SERVICE will not be set for unprivileged requesters,
585 * but it is always enabled for rpciod, which handles the connect
589 if (args
->flags
& RPC_CLNT_CREATE_NONPRIVPORT
)
592 return rpc_create_xprt(args
, xprt
);
594 EXPORT_SYMBOL_GPL(rpc_create
);
597 * This function clones the RPC client structure. It allows us to share the
598 * same transport while varying parameters such as the authentication
601 static struct rpc_clnt
*__rpc_clone_client(struct rpc_create_args
*args
,
602 struct rpc_clnt
*clnt
)
604 struct rpc_xprt_switch
*xps
;
605 struct rpc_xprt
*xprt
;
606 struct rpc_clnt
*new;
611 xprt
= xprt_get(rcu_dereference(clnt
->cl_xprt
));
612 xps
= xprt_switch_get(rcu_dereference(clnt
->cl_xpi
.xpi_xpswitch
));
614 if (xprt
== NULL
|| xps
== NULL
) {
616 xprt_switch_put(xps
);
619 args
->servername
= xprt
->servername
;
620 args
->nodename
= clnt
->cl_nodename
;
622 new = rpc_new_client(args
, xps
, xprt
, clnt
);
628 /* Turn off autobind on clones */
629 new->cl_autobind
= 0;
630 new->cl_softrtry
= clnt
->cl_softrtry
;
631 new->cl_softerr
= clnt
->cl_softerr
;
632 new->cl_noretranstimeo
= clnt
->cl_noretranstimeo
;
633 new->cl_discrtry
= clnt
->cl_discrtry
;
634 new->cl_chatty
= clnt
->cl_chatty
;
635 new->cl_principal
= clnt
->cl_principal
;
636 new->cl_cred
= get_cred(clnt
->cl_cred
);
640 dprintk("RPC: %s: returned error %d\n", __func__
, err
);
645 * rpc_clone_client - Clone an RPC client structure
647 * @clnt: RPC client whose parameters are copied
649 * Returns a fresh RPC client or an ERR_PTR.
651 struct rpc_clnt
*rpc_clone_client(struct rpc_clnt
*clnt
)
653 struct rpc_create_args args
= {
654 .program
= clnt
->cl_program
,
655 .prognumber
= clnt
->cl_prog
,
656 .version
= clnt
->cl_vers
,
657 .authflavor
= clnt
->cl_auth
->au_flavor
,
658 .cred
= clnt
->cl_cred
,
660 return __rpc_clone_client(&args
, clnt
);
662 EXPORT_SYMBOL_GPL(rpc_clone_client
);
665 * rpc_clone_client_set_auth - Clone an RPC client structure and set its auth
667 * @clnt: RPC client whose parameters are copied
668 * @flavor: security flavor for new client
670 * Returns a fresh RPC client or an ERR_PTR.
673 rpc_clone_client_set_auth(struct rpc_clnt
*clnt
, rpc_authflavor_t flavor
)
675 struct rpc_create_args args
= {
676 .program
= clnt
->cl_program
,
677 .prognumber
= clnt
->cl_prog
,
678 .version
= clnt
->cl_vers
,
679 .authflavor
= flavor
,
680 .cred
= clnt
->cl_cred
,
682 return __rpc_clone_client(&args
, clnt
);
684 EXPORT_SYMBOL_GPL(rpc_clone_client_set_auth
);
687 * rpc_switch_client_transport: switch the RPC transport on the fly
688 * @clnt: pointer to a struct rpc_clnt
689 * @args: pointer to the new transport arguments
690 * @timeout: pointer to the new timeout parameters
692 * This function allows the caller to switch the RPC transport for the
693 * rpc_clnt structure 'clnt' to allow it to connect to a mirrored NFS
694 * server, for instance. It assumes that the caller has ensured that
695 * there are no active RPC tasks by using some form of locking.
697 * Returns zero if "clnt" is now using the new xprt. Otherwise a
698 * negative errno is returned, and "clnt" continues to use the old
701 int rpc_switch_client_transport(struct rpc_clnt
*clnt
,
702 struct xprt_create
*args
,
703 const struct rpc_timeout
*timeout
)
705 const struct rpc_timeout
*old_timeo
;
706 rpc_authflavor_t pseudoflavor
;
707 struct rpc_xprt_switch
*xps
, *oldxps
;
708 struct rpc_xprt
*xprt
, *old
;
709 struct rpc_clnt
*parent
;
712 xprt
= xprt_create_transport(args
);
714 dprintk("RPC: failed to create new xprt for clnt %p\n",
716 return PTR_ERR(xprt
);
719 xps
= xprt_switch_alloc(xprt
, GFP_KERNEL
);
725 pseudoflavor
= clnt
->cl_auth
->au_flavor
;
727 old_timeo
= clnt
->cl_timeout
;
728 old
= rpc_clnt_set_transport(clnt
, xprt
, timeout
);
729 oldxps
= xprt_iter_xchg_switch(&clnt
->cl_xpi
, xps
);
731 rpc_unregister_client(clnt
);
732 __rpc_clnt_remove_pipedir(clnt
);
733 rpc_clnt_debugfs_unregister(clnt
);
736 * A new transport was created. "clnt" therefore
737 * becomes the root of a new cl_parent tree. clnt's
738 * children, if it has any, still point to the old xprt.
740 parent
= clnt
->cl_parent
;
741 clnt
->cl_parent
= clnt
;
744 * The old rpc_auth cache cannot be re-used. GSS
745 * contexts in particular are between a single
748 err
= rpc_client_register(clnt
, pseudoflavor
, NULL
);
754 rpc_release_client(parent
);
755 xprt_switch_put(oldxps
);
757 dprintk("RPC: replaced xprt for clnt %p\n", clnt
);
761 xps
= xprt_iter_xchg_switch(&clnt
->cl_xpi
, oldxps
);
762 rpc_clnt_set_transport(clnt
, old
, old_timeo
);
763 clnt
->cl_parent
= parent
;
764 rpc_client_register(clnt
, pseudoflavor
, NULL
);
765 xprt_switch_put(xps
);
767 dprintk("RPC: failed to switch xprt for clnt %p\n", clnt
);
770 EXPORT_SYMBOL_GPL(rpc_switch_client_transport
);
773 int rpc_clnt_xprt_iter_init(struct rpc_clnt
*clnt
, struct rpc_xprt_iter
*xpi
)
775 struct rpc_xprt_switch
*xps
;
778 xps
= xprt_switch_get(rcu_dereference(clnt
->cl_xpi
.xpi_xpswitch
));
782 xprt_iter_init_listall(xpi
, xps
);
783 xprt_switch_put(xps
);
788 * rpc_clnt_iterate_for_each_xprt - Apply a function to all transports
789 * @clnt: pointer to client
790 * @fn: function to apply
791 * @data: void pointer to function data
793 * Iterates through the list of RPC transports currently attached to the
794 * client and applies the function fn(clnt, xprt, data).
796 * On error, the iteration stops, and the function returns the error value.
798 int rpc_clnt_iterate_for_each_xprt(struct rpc_clnt
*clnt
,
799 int (*fn
)(struct rpc_clnt
*, struct rpc_xprt
*, void *),
802 struct rpc_xprt_iter xpi
;
805 ret
= rpc_clnt_xprt_iter_init(clnt
, &xpi
);
809 struct rpc_xprt
*xprt
= xprt_iter_get_next(&xpi
);
813 ret
= fn(clnt
, xprt
, data
);
818 xprt_iter_destroy(&xpi
);
821 EXPORT_SYMBOL_GPL(rpc_clnt_iterate_for_each_xprt
);
824 * Kill all tasks for the given client.
825 * XXX: kill their descendants as well?
827 void rpc_killall_tasks(struct rpc_clnt
*clnt
)
829 struct rpc_task
*rovr
;
832 if (list_empty(&clnt
->cl_tasks
))
834 dprintk("RPC: killing all tasks for client %p\n", clnt
);
836 * Spin lock all_tasks to prevent changes...
838 spin_lock(&clnt
->cl_lock
);
839 list_for_each_entry(rovr
, &clnt
->cl_tasks
, tk_task
)
840 rpc_signal_task(rovr
);
841 spin_unlock(&clnt
->cl_lock
);
843 EXPORT_SYMBOL_GPL(rpc_killall_tasks
);
846 * Properly shut down an RPC client, terminating all outstanding
849 void rpc_shutdown_client(struct rpc_clnt
*clnt
)
853 dprintk_rcu("RPC: shutting down %s client for %s\n",
854 clnt
->cl_program
->name
,
855 rcu_dereference(clnt
->cl_xprt
)->servername
);
857 while (!list_empty(&clnt
->cl_tasks
)) {
858 rpc_killall_tasks(clnt
);
859 wait_event_timeout(destroy_wait
,
860 list_empty(&clnt
->cl_tasks
), 1*HZ
);
863 rpc_release_client(clnt
);
865 EXPORT_SYMBOL_GPL(rpc_shutdown_client
);
870 static struct rpc_clnt
*
871 rpc_free_client(struct rpc_clnt
*clnt
)
873 struct rpc_clnt
*parent
= NULL
;
875 dprintk_rcu("RPC: destroying %s client for %s\n",
876 clnt
->cl_program
->name
,
877 rcu_dereference(clnt
->cl_xprt
)->servername
);
878 if (clnt
->cl_parent
!= clnt
)
879 parent
= clnt
->cl_parent
;
880 rpc_clnt_debugfs_unregister(clnt
);
881 rpc_clnt_remove_pipedir(clnt
);
882 rpc_unregister_client(clnt
);
883 rpc_free_iostats(clnt
->cl_metrics
);
884 clnt
->cl_metrics
= NULL
;
885 xprt_put(rcu_dereference_raw(clnt
->cl_xprt
));
886 xprt_iter_destroy(&clnt
->cl_xpi
);
888 put_cred(clnt
->cl_cred
);
897 static struct rpc_clnt
*
898 rpc_free_auth(struct rpc_clnt
*clnt
)
900 if (clnt
->cl_auth
== NULL
)
901 return rpc_free_client(clnt
);
904 * Note: RPCSEC_GSS may need to send NULL RPC calls in order to
905 * release remaining GSS contexts. This mechanism ensures
906 * that it can do so safely.
908 atomic_inc(&clnt
->cl_count
);
909 rpcauth_release(clnt
->cl_auth
);
910 clnt
->cl_auth
= NULL
;
911 if (atomic_dec_and_test(&clnt
->cl_count
))
912 return rpc_free_client(clnt
);
917 * Release reference to the RPC client
920 rpc_release_client(struct rpc_clnt
*clnt
)
922 dprintk("RPC: rpc_release_client(%p)\n", clnt
);
925 if (list_empty(&clnt
->cl_tasks
))
926 wake_up(&destroy_wait
);
927 if (!atomic_dec_and_test(&clnt
->cl_count
))
929 clnt
= rpc_free_auth(clnt
);
930 } while (clnt
!= NULL
);
932 EXPORT_SYMBOL_GPL(rpc_release_client
);
935 * rpc_bind_new_program - bind a new RPC program to an existing client
936 * @old: old rpc_client
937 * @program: rpc program to set
938 * @vers: rpc program version
940 * Clones the rpc client and sets up a new RPC program. This is mainly
941 * of use for enabling different RPC programs to share the same transport.
942 * The Sun NFSv2/v3 ACL protocol can do this.
944 struct rpc_clnt
*rpc_bind_new_program(struct rpc_clnt
*old
,
945 const struct rpc_program
*program
,
948 struct rpc_create_args args
= {
950 .prognumber
= program
->number
,
952 .authflavor
= old
->cl_auth
->au_flavor
,
953 .cred
= old
->cl_cred
,
955 struct rpc_clnt
*clnt
;
958 clnt
= __rpc_clone_client(&args
, old
);
961 err
= rpc_ping(clnt
);
963 rpc_shutdown_client(clnt
);
969 EXPORT_SYMBOL_GPL(rpc_bind_new_program
);
971 void rpc_task_release_transport(struct rpc_task
*task
)
973 struct rpc_xprt
*xprt
= task
->tk_xprt
;
976 task
->tk_xprt
= NULL
;
980 EXPORT_SYMBOL_GPL(rpc_task_release_transport
);
982 void rpc_task_release_client(struct rpc_task
*task
)
984 struct rpc_clnt
*clnt
= task
->tk_client
;
987 /* Remove from client task list */
988 spin_lock(&clnt
->cl_lock
);
989 list_del(&task
->tk_task
);
990 spin_unlock(&clnt
->cl_lock
);
991 task
->tk_client
= NULL
;
993 rpc_release_client(clnt
);
995 rpc_task_release_transport(task
);
999 void rpc_task_set_transport(struct rpc_task
*task
, struct rpc_clnt
*clnt
)
1002 task
->tk_xprt
= xprt_iter_get_next(&clnt
->cl_xpi
);
1006 void rpc_task_set_client(struct rpc_task
*task
, struct rpc_clnt
*clnt
)
1010 rpc_task_set_transport(task
, clnt
);
1011 task
->tk_client
= clnt
;
1012 atomic_inc(&clnt
->cl_count
);
1013 if (clnt
->cl_softrtry
)
1014 task
->tk_flags
|= RPC_TASK_SOFT
;
1015 if (clnt
->cl_softerr
)
1016 task
->tk_flags
|= RPC_TASK_TIMEOUT
;
1017 if (clnt
->cl_noretranstimeo
)
1018 task
->tk_flags
|= RPC_TASK_NO_RETRANS_TIMEOUT
;
1019 if (atomic_read(&clnt
->cl_swapper
))
1020 task
->tk_flags
|= RPC_TASK_SWAPPER
;
1021 /* Add to the client's list of all tasks */
1022 spin_lock(&clnt
->cl_lock
);
1023 list_add_tail(&task
->tk_task
, &clnt
->cl_tasks
);
1024 spin_unlock(&clnt
->cl_lock
);
1029 rpc_task_set_rpc_message(struct rpc_task
*task
, const struct rpc_message
*msg
)
1032 task
->tk_msg
.rpc_proc
= msg
->rpc_proc
;
1033 task
->tk_msg
.rpc_argp
= msg
->rpc_argp
;
1034 task
->tk_msg
.rpc_resp
= msg
->rpc_resp
;
1035 if (msg
->rpc_cred
!= NULL
)
1036 task
->tk_msg
.rpc_cred
= get_cred(msg
->rpc_cred
);
1041 * Default callback for async RPC calls
1044 rpc_default_callback(struct rpc_task
*task
, void *data
)
1048 static const struct rpc_call_ops rpc_default_ops
= {
1049 .rpc_call_done
= rpc_default_callback
,
1053 * rpc_run_task - Allocate a new RPC task, then run rpc_execute against it
1054 * @task_setup_data: pointer to task initialisation data
1056 struct rpc_task
*rpc_run_task(const struct rpc_task_setup
*task_setup_data
)
1058 struct rpc_task
*task
;
1060 task
= rpc_new_task(task_setup_data
);
1062 rpc_task_set_client(task
, task_setup_data
->rpc_client
);
1063 rpc_task_set_rpc_message(task
, task_setup_data
->rpc_message
);
1065 if (task
->tk_action
== NULL
)
1066 rpc_call_start(task
);
1068 atomic_inc(&task
->tk_count
);
1072 EXPORT_SYMBOL_GPL(rpc_run_task
);
1075 * rpc_call_sync - Perform a synchronous RPC call
1076 * @clnt: pointer to RPC client
1077 * @msg: RPC call parameters
1078 * @flags: RPC call flags
1080 int rpc_call_sync(struct rpc_clnt
*clnt
, const struct rpc_message
*msg
, int flags
)
1082 struct rpc_task
*task
;
1083 struct rpc_task_setup task_setup_data
= {
1086 .callback_ops
= &rpc_default_ops
,
1091 WARN_ON_ONCE(flags
& RPC_TASK_ASYNC
);
1092 if (flags
& RPC_TASK_ASYNC
) {
1093 rpc_release_calldata(task_setup_data
.callback_ops
,
1094 task_setup_data
.callback_data
);
1098 task
= rpc_run_task(&task_setup_data
);
1100 return PTR_ERR(task
);
1101 status
= task
->tk_status
;
1105 EXPORT_SYMBOL_GPL(rpc_call_sync
);
1108 * rpc_call_async - Perform an asynchronous RPC call
1109 * @clnt: pointer to RPC client
1110 * @msg: RPC call parameters
1111 * @flags: RPC call flags
1112 * @tk_ops: RPC call ops
1113 * @data: user call data
1116 rpc_call_async(struct rpc_clnt
*clnt
, const struct rpc_message
*msg
, int flags
,
1117 const struct rpc_call_ops
*tk_ops
, void *data
)
1119 struct rpc_task
*task
;
1120 struct rpc_task_setup task_setup_data
= {
1123 .callback_ops
= tk_ops
,
1124 .callback_data
= data
,
1125 .flags
= flags
|RPC_TASK_ASYNC
,
1128 task
= rpc_run_task(&task_setup_data
);
1130 return PTR_ERR(task
);
1134 EXPORT_SYMBOL_GPL(rpc_call_async
);
1136 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
1137 static void call_bc_encode(struct rpc_task
*task
);
1140 * rpc_run_bc_task - Allocate a new RPC task for backchannel use, then run
1141 * rpc_execute against it
1144 struct rpc_task
*rpc_run_bc_task(struct rpc_rqst
*req
)
1146 struct rpc_task
*task
;
1147 struct rpc_task_setup task_setup_data
= {
1148 .callback_ops
= &rpc_default_ops
,
1149 .flags
= RPC_TASK_SOFTCONN
|
1150 RPC_TASK_NO_RETRANS_TIMEOUT
,
1153 dprintk("RPC: rpc_run_bc_task req= %p\n", req
);
1155 * Create an rpc_task to send the data
1157 task
= rpc_new_task(&task_setup_data
);
1158 xprt_init_bc_request(req
, task
);
1160 task
->tk_action
= call_bc_encode
;
1161 atomic_inc(&task
->tk_count
);
1162 WARN_ON_ONCE(atomic_read(&task
->tk_count
) != 2);
1165 dprintk("RPC: rpc_run_bc_task: task= %p\n", task
);
1168 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
1171 * rpc_prepare_reply_pages - Prepare to receive a reply data payload into pages
1172 * @req: RPC request to prepare
1173 * @pages: vector of struct page pointers
1174 * @base: offset in first page where receive should start, in bytes
1175 * @len: expected size of the upper layer data payload, in bytes
1176 * @hdrsize: expected size of upper layer reply header, in XDR words
1179 void rpc_prepare_reply_pages(struct rpc_rqst
*req
, struct page
**pages
,
1180 unsigned int base
, unsigned int len
,
1181 unsigned int hdrsize
)
1183 /* Subtract one to force an extra word of buffer space for the
1184 * payload's XDR pad to fall into the rcv_buf's tail iovec.
1186 hdrsize
+= RPC_REPHDRSIZE
+ req
->rq_cred
->cr_auth
->au_ralign
- 1;
1188 xdr_inline_pages(&req
->rq_rcv_buf
, hdrsize
<< 2, pages
, base
, len
);
1189 trace_rpc_reply_pages(req
);
1191 EXPORT_SYMBOL_GPL(rpc_prepare_reply_pages
);
1194 rpc_call_start(struct rpc_task
*task
)
1196 task
->tk_action
= call_start
;
1198 EXPORT_SYMBOL_GPL(rpc_call_start
);
1201 * rpc_peeraddr - extract remote peer address from clnt's xprt
1202 * @clnt: RPC client structure
1203 * @buf: target buffer
1204 * @bufsize: length of target buffer
1206 * Returns the number of bytes that are actually in the stored address.
1208 size_t rpc_peeraddr(struct rpc_clnt
*clnt
, struct sockaddr
*buf
, size_t bufsize
)
1211 struct rpc_xprt
*xprt
;
1214 xprt
= rcu_dereference(clnt
->cl_xprt
);
1216 bytes
= xprt
->addrlen
;
1217 if (bytes
> bufsize
)
1219 memcpy(buf
, &xprt
->addr
, bytes
);
1224 EXPORT_SYMBOL_GPL(rpc_peeraddr
);
1227 * rpc_peeraddr2str - return remote peer address in printable format
1228 * @clnt: RPC client structure
1229 * @format: address format
1231 * NB: the lifetime of the memory referenced by the returned pointer is
1232 * the same as the rpc_xprt itself. As long as the caller uses this
1233 * pointer, it must hold the RCU read lock.
1235 const char *rpc_peeraddr2str(struct rpc_clnt
*clnt
,
1236 enum rpc_display_format_t format
)
1238 struct rpc_xprt
*xprt
;
1240 xprt
= rcu_dereference(clnt
->cl_xprt
);
1242 if (xprt
->address_strings
[format
] != NULL
)
1243 return xprt
->address_strings
[format
];
1245 return "unprintable";
1247 EXPORT_SYMBOL_GPL(rpc_peeraddr2str
);
1249 static const struct sockaddr_in rpc_inaddr_loopback
= {
1250 .sin_family
= AF_INET
,
1251 .sin_addr
.s_addr
= htonl(INADDR_ANY
),
1254 static const struct sockaddr_in6 rpc_in6addr_loopback
= {
1255 .sin6_family
= AF_INET6
,
1256 .sin6_addr
= IN6ADDR_ANY_INIT
,
1260 * Try a getsockname() on a connected datagram socket. Using a
1261 * connected datagram socket prevents leaving a socket in TIME_WAIT.
1262 * This conserves the ephemeral port number space.
1264 * Returns zero and fills in "buf" if successful; otherwise, a
1265 * negative errno is returned.
1267 static int rpc_sockname(struct net
*net
, struct sockaddr
*sap
, size_t salen
,
1268 struct sockaddr
*buf
)
1270 struct socket
*sock
;
1273 err
= __sock_create(net
, sap
->sa_family
,
1274 SOCK_DGRAM
, IPPROTO_UDP
, &sock
, 1);
1276 dprintk("RPC: can't create UDP socket (%d)\n", err
);
1280 switch (sap
->sa_family
) {
1282 err
= kernel_bind(sock
,
1283 (struct sockaddr
*)&rpc_inaddr_loopback
,
1284 sizeof(rpc_inaddr_loopback
));
1287 err
= kernel_bind(sock
,
1288 (struct sockaddr
*)&rpc_in6addr_loopback
,
1289 sizeof(rpc_in6addr_loopback
));
1292 err
= -EAFNOSUPPORT
;
1296 dprintk("RPC: can't bind UDP socket (%d)\n", err
);
1300 err
= kernel_connect(sock
, sap
, salen
, 0);
1302 dprintk("RPC: can't connect UDP socket (%d)\n", err
);
1306 err
= kernel_getsockname(sock
, buf
);
1308 dprintk("RPC: getsockname failed (%d)\n", err
);
1313 if (buf
->sa_family
== AF_INET6
) {
1314 struct sockaddr_in6
*sin6
= (struct sockaddr_in6
*)buf
;
1315 sin6
->sin6_scope_id
= 0;
1317 dprintk("RPC: %s succeeded\n", __func__
);
1326 * Scraping a connected socket failed, so we don't have a useable
1327 * local address. Fallback: generate an address that will prevent
1328 * the server from calling us back.
1330 * Returns zero and fills in "buf" if successful; otherwise, a
1331 * negative errno is returned.
1333 static int rpc_anyaddr(int family
, struct sockaddr
*buf
, size_t buflen
)
1337 if (buflen
< sizeof(rpc_inaddr_loopback
))
1339 memcpy(buf
, &rpc_inaddr_loopback
,
1340 sizeof(rpc_inaddr_loopback
));
1343 if (buflen
< sizeof(rpc_in6addr_loopback
))
1345 memcpy(buf
, &rpc_in6addr_loopback
,
1346 sizeof(rpc_in6addr_loopback
));
1349 dprintk("RPC: %s: address family not supported\n",
1351 return -EAFNOSUPPORT
;
1353 dprintk("RPC: %s: succeeded\n", __func__
);
1358 * rpc_localaddr - discover local endpoint address for an RPC client
1359 * @clnt: RPC client structure
1360 * @buf: target buffer
1361 * @buflen: size of target buffer, in bytes
1363 * Returns zero and fills in "buf" and "buflen" if successful;
1364 * otherwise, a negative errno is returned.
1366 * This works even if the underlying transport is not currently connected,
1367 * or if the upper layer never previously provided a source address.
1369 * The result of this function call is transient: multiple calls in
1370 * succession may give different results, depending on how local
1371 * networking configuration changes over time.
1373 int rpc_localaddr(struct rpc_clnt
*clnt
, struct sockaddr
*buf
, size_t buflen
)
1375 struct sockaddr_storage address
;
1376 struct sockaddr
*sap
= (struct sockaddr
*)&address
;
1377 struct rpc_xprt
*xprt
;
1383 xprt
= rcu_dereference(clnt
->cl_xprt
);
1384 salen
= xprt
->addrlen
;
1385 memcpy(sap
, &xprt
->addr
, salen
);
1386 net
= get_net(xprt
->xprt_net
);
1389 rpc_set_port(sap
, 0);
1390 err
= rpc_sockname(net
, sap
, salen
, buf
);
1393 /* Couldn't discover local address, return ANYADDR */
1394 return rpc_anyaddr(sap
->sa_family
, buf
, buflen
);
1397 EXPORT_SYMBOL_GPL(rpc_localaddr
);
1400 rpc_setbufsize(struct rpc_clnt
*clnt
, unsigned int sndsize
, unsigned int rcvsize
)
1402 struct rpc_xprt
*xprt
;
1405 xprt
= rcu_dereference(clnt
->cl_xprt
);
1406 if (xprt
->ops
->set_buffer_size
)
1407 xprt
->ops
->set_buffer_size(xprt
, sndsize
, rcvsize
);
1410 EXPORT_SYMBOL_GPL(rpc_setbufsize
);
1413 * rpc_net_ns - Get the network namespace for this RPC client
1414 * @clnt: RPC client to query
1417 struct net
*rpc_net_ns(struct rpc_clnt
*clnt
)
1422 ret
= rcu_dereference(clnt
->cl_xprt
)->xprt_net
;
1426 EXPORT_SYMBOL_GPL(rpc_net_ns
);
1429 * rpc_max_payload - Get maximum payload size for a transport, in bytes
1430 * @clnt: RPC client to query
1432 * For stream transports, this is one RPC record fragment (see RFC
1433 * 1831), as we don't support multi-record requests yet. For datagram
1434 * transports, this is the size of an IP packet minus the IP, UDP, and
1437 size_t rpc_max_payload(struct rpc_clnt
*clnt
)
1442 ret
= rcu_dereference(clnt
->cl_xprt
)->max_payload
;
1446 EXPORT_SYMBOL_GPL(rpc_max_payload
);
1449 * rpc_max_bc_payload - Get maximum backchannel payload size, in bytes
1450 * @clnt: RPC client to query
1452 size_t rpc_max_bc_payload(struct rpc_clnt
*clnt
)
1454 struct rpc_xprt
*xprt
;
1458 xprt
= rcu_dereference(clnt
->cl_xprt
);
1459 ret
= xprt
->ops
->bc_maxpayload(xprt
);
1463 EXPORT_SYMBOL_GPL(rpc_max_bc_payload
);
1466 * rpc_force_rebind - force transport to check that remote port is unchanged
1467 * @clnt: client to rebind
1470 void rpc_force_rebind(struct rpc_clnt
*clnt
)
1472 if (clnt
->cl_autobind
) {
1474 xprt_clear_bound(rcu_dereference(clnt
->cl_xprt
));
1478 EXPORT_SYMBOL_GPL(rpc_force_rebind
);
1481 __rpc_restart_call(struct rpc_task
*task
, void (*action
)(struct rpc_task
*))
1483 task
->tk_status
= 0;
1484 task
->tk_rpc_status
= 0;
1485 task
->tk_action
= action
;
1490 * Restart an (async) RPC call. Usually called from within the
1494 rpc_restart_call(struct rpc_task
*task
)
1496 return __rpc_restart_call(task
, call_start
);
1498 EXPORT_SYMBOL_GPL(rpc_restart_call
);
1501 * Restart an (async) RPC call from the call_prepare state.
1502 * Usually called from within the exit handler.
1505 rpc_restart_call_prepare(struct rpc_task
*task
)
1507 if (task
->tk_ops
->rpc_call_prepare
!= NULL
)
1508 return __rpc_restart_call(task
, rpc_prepare_task
);
1509 return rpc_restart_call(task
);
1511 EXPORT_SYMBOL_GPL(rpc_restart_call_prepare
);
1514 *rpc_proc_name(const struct rpc_task
*task
)
1516 const struct rpc_procinfo
*proc
= task
->tk_msg
.rpc_proc
;
1520 return proc
->p_name
;
1528 __rpc_call_rpcerror(struct rpc_task
*task
, int tk_status
, int rpc_status
)
1530 task
->tk_rpc_status
= rpc_status
;
1531 rpc_exit(task
, tk_status
);
1535 rpc_call_rpcerror(struct rpc_task
*task
, int status
)
1537 __rpc_call_rpcerror(task
, status
, status
);
1543 * Other FSM states can be visited zero or more times, but
1544 * this state is visited exactly once for each RPC.
1547 call_start(struct rpc_task
*task
)
1549 struct rpc_clnt
*clnt
= task
->tk_client
;
1550 int idx
= task
->tk_msg
.rpc_proc
->p_statidx
;
1552 trace_rpc_request(task
);
1553 dprintk("RPC: %5u call_start %s%d proc %s (%s)\n", task
->tk_pid
,
1554 clnt
->cl_program
->name
, clnt
->cl_vers
,
1555 rpc_proc_name(task
),
1556 (RPC_IS_ASYNC(task
) ? "async" : "sync"));
1558 /* Increment call count (version might not be valid for ping) */
1559 if (clnt
->cl_program
->version
[clnt
->cl_vers
])
1560 clnt
->cl_program
->version
[clnt
->cl_vers
]->counts
[idx
]++;
1561 clnt
->cl_stats
->rpccnt
++;
1562 task
->tk_action
= call_reserve
;
1563 rpc_task_set_transport(task
, clnt
);
1567 * 1. Reserve an RPC call slot
1570 call_reserve(struct rpc_task
*task
)
1572 dprint_status(task
);
1574 task
->tk_status
= 0;
1575 task
->tk_action
= call_reserveresult
;
1579 static void call_retry_reserve(struct rpc_task
*task
);
1582 * 1b. Grok the result of xprt_reserve()
1585 call_reserveresult(struct rpc_task
*task
)
1587 int status
= task
->tk_status
;
1589 dprint_status(task
);
1592 * After a call to xprt_reserve(), we must have either
1593 * a request slot or else an error status.
1595 task
->tk_status
= 0;
1597 if (task
->tk_rqstp
) {
1598 task
->tk_action
= call_refresh
;
1602 printk(KERN_ERR
"%s: status=%d, but no request slot, exiting\n",
1604 rpc_call_rpcerror(task
, -EIO
);
1609 * Even though there was an error, we may have acquired
1610 * a request slot somehow. Make sure not to leak it.
1612 if (task
->tk_rqstp
) {
1613 printk(KERN_ERR
"%s: status=%d, request allocated anyway\n",
1620 rpc_delay(task
, HZ
>> 2);
1622 case -EAGAIN
: /* woken up; retry */
1623 task
->tk_action
= call_retry_reserve
;
1625 case -EIO
: /* probably a shutdown */
1628 printk(KERN_ERR
"%s: unrecognized error %d, exiting\n",
1632 rpc_call_rpcerror(task
, status
);
1636 * 1c. Retry reserving an RPC call slot
1639 call_retry_reserve(struct rpc_task
*task
)
1641 dprint_status(task
);
1643 task
->tk_status
= 0;
1644 task
->tk_action
= call_reserveresult
;
1645 xprt_retry_reserve(task
);
1649 * 2. Bind and/or refresh the credentials
1652 call_refresh(struct rpc_task
*task
)
1654 dprint_status(task
);
1656 task
->tk_action
= call_refreshresult
;
1657 task
->tk_status
= 0;
1658 task
->tk_client
->cl_stats
->rpcauthrefresh
++;
1659 rpcauth_refreshcred(task
);
1663 * 2a. Process the results of a credential refresh
1666 call_refreshresult(struct rpc_task
*task
)
1668 int status
= task
->tk_status
;
1670 dprint_status(task
);
1672 task
->tk_status
= 0;
1673 task
->tk_action
= call_refresh
;
1676 if (rpcauth_uptodatecred(task
)) {
1677 task
->tk_action
= call_allocate
;
1680 /* Use rate-limiting and a max number of retries if refresh
1681 * had status 0 but failed to update the cred.
1685 rpc_delay(task
, 3*HZ
);
1691 if (!task
->tk_cred_retry
)
1693 task
->tk_cred_retry
--;
1694 dprintk("RPC: %5u %s: retry refresh creds\n",
1695 task
->tk_pid
, __func__
);
1698 dprintk("RPC: %5u %s: refresh creds failed with error %d\n",
1699 task
->tk_pid
, __func__
, status
);
1700 rpc_call_rpcerror(task
, status
);
1704 * 2b. Allocate the buffer. For details, see sched.c:rpc_malloc.
1705 * (Note: buffer memory is freed in xprt_release).
1708 call_allocate(struct rpc_task
*task
)
1710 const struct rpc_auth
*auth
= task
->tk_rqstp
->rq_cred
->cr_auth
;
1711 struct rpc_rqst
*req
= task
->tk_rqstp
;
1712 struct rpc_xprt
*xprt
= req
->rq_xprt
;
1713 const struct rpc_procinfo
*proc
= task
->tk_msg
.rpc_proc
;
1716 dprint_status(task
);
1718 task
->tk_status
= 0;
1719 task
->tk_action
= call_encode
;
1724 if (proc
->p_proc
!= 0) {
1725 BUG_ON(proc
->p_arglen
== 0);
1726 if (proc
->p_decode
!= NULL
)
1727 BUG_ON(proc
->p_replen
== 0);
1731 * Calculate the size (in quads) of the RPC call
1732 * and reply headers, and convert both values
1735 req
->rq_callsize
= RPC_CALLHDRSIZE
+ (auth
->au_cslack
<< 1) +
1737 req
->rq_callsize
<<= 2;
1739 * Note: the reply buffer must at minimum allocate enough space
1740 * for the 'struct accepted_reply' from RFC5531.
1742 req
->rq_rcvsize
= RPC_REPHDRSIZE
+ auth
->au_rslack
+ \
1743 max_t(size_t, proc
->p_replen
, 2);
1744 req
->rq_rcvsize
<<= 2;
1746 status
= xprt
->ops
->buf_alloc(task
);
1747 xprt_inject_disconnect(xprt
);
1750 if (status
!= -ENOMEM
) {
1751 rpc_call_rpcerror(task
, status
);
1755 dprintk("RPC: %5u rpc_buffer allocation failed\n", task
->tk_pid
);
1757 if (RPC_IS_ASYNC(task
) || !fatal_signal_pending(current
)) {
1758 task
->tk_action
= call_allocate
;
1759 rpc_delay(task
, HZ
>>4);
1763 rpc_exit(task
, -ERESTARTSYS
);
1767 rpc_task_need_encode(struct rpc_task
*task
)
1769 return test_bit(RPC_TASK_NEED_XMIT
, &task
->tk_runstate
) == 0 &&
1770 (!(task
->tk_flags
& RPC_TASK_SENT
) ||
1771 !(task
->tk_flags
& RPC_TASK_NO_RETRANS_TIMEOUT
) ||
1772 xprt_request_need_retransmit(task
));
1776 rpc_xdr_encode(struct rpc_task
*task
)
1778 struct rpc_rqst
*req
= task
->tk_rqstp
;
1779 struct xdr_stream xdr
;
1781 xdr_buf_init(&req
->rq_snd_buf
,
1784 xdr_buf_init(&req
->rq_rcv_buf
,
1788 req
->rq_snd_buf
.head
[0].iov_len
= 0;
1789 xdr_init_encode(&xdr
, &req
->rq_snd_buf
,
1790 req
->rq_snd_buf
.head
[0].iov_base
, req
);
1791 if (rpc_encode_header(task
, &xdr
))
1794 task
->tk_status
= rpcauth_wrap_req(task
, &xdr
);
1798 * 3. Encode arguments of an RPC call
1801 call_encode(struct rpc_task
*task
)
1803 if (!rpc_task_need_encode(task
))
1805 dprint_status(task
);
1806 /* Encode here so that rpcsec_gss can use correct sequence number. */
1807 rpc_xdr_encode(task
);
1808 /* Did the encode result in an error condition? */
1809 if (task
->tk_status
!= 0) {
1810 /* Was the error nonfatal? */
1811 switch (task
->tk_status
) {
1814 rpc_delay(task
, HZ
>> 4);
1817 if (!task
->tk_cred_retry
) {
1818 rpc_exit(task
, task
->tk_status
);
1820 task
->tk_action
= call_refresh
;
1821 task
->tk_cred_retry
--;
1822 dprintk("RPC: %5u %s: retry refresh creds\n",
1823 task
->tk_pid
, __func__
);
1827 rpc_call_rpcerror(task
, task
->tk_status
);
1831 xprt_request_prepare(task
->tk_rqstp
);
1834 /* Add task to reply queue before transmission to avoid races */
1835 if (rpc_reply_expected(task
))
1836 xprt_request_enqueue_receive(task
);
1837 xprt_request_enqueue_transmit(task
);
1839 task
->tk_action
= call_transmit
;
1840 /* Check that the connection is OK */
1841 if (!xprt_bound(task
->tk_xprt
))
1842 task
->tk_action
= call_bind
;
1843 else if (!xprt_connected(task
->tk_xprt
))
1844 task
->tk_action
= call_connect
;
1848 * Helpers to check if the task was already transmitted, and
1849 * to take action when that is the case.
1852 rpc_task_transmitted(struct rpc_task
*task
)
1854 return !test_bit(RPC_TASK_NEED_XMIT
, &task
->tk_runstate
);
1858 rpc_task_handle_transmitted(struct rpc_task
*task
)
1860 xprt_end_transmit(task
);
1861 task
->tk_action
= call_transmit_status
;
1865 * 4. Get the server port number if not yet set
1868 call_bind(struct rpc_task
*task
)
1870 struct rpc_xprt
*xprt
= task
->tk_rqstp
->rq_xprt
;
1872 if (rpc_task_transmitted(task
)) {
1873 rpc_task_handle_transmitted(task
);
1877 if (xprt_bound(xprt
)) {
1878 task
->tk_action
= call_connect
;
1882 dprint_status(task
);
1884 task
->tk_action
= call_bind_status
;
1885 if (!xprt_prepare_transmit(task
))
1888 xprt
->ops
->rpcbind(task
);
1892 * 4a. Sort out bind result
1895 call_bind_status(struct rpc_task
*task
)
1899 if (rpc_task_transmitted(task
)) {
1900 rpc_task_handle_transmitted(task
);
1904 if (task
->tk_status
>= 0) {
1905 dprint_status(task
);
1906 task
->tk_status
= 0;
1907 task
->tk_action
= call_connect
;
1911 trace_rpc_bind_status(task
);
1912 switch (task
->tk_status
) {
1914 dprintk("RPC: %5u rpcbind out of memory\n", task
->tk_pid
);
1915 rpc_delay(task
, HZ
>> 2);
1918 dprintk("RPC: %5u remote rpcbind: RPC program/version "
1919 "unavailable\n", task
->tk_pid
);
1920 /* fail immediately if this is an RPC ping */
1921 if (task
->tk_msg
.rpc_proc
->p_proc
== 0) {
1922 status
= -EOPNOTSUPP
;
1925 if (task
->tk_rebind_retry
== 0)
1927 task
->tk_rebind_retry
--;
1928 rpc_delay(task
, 3*HZ
);
1933 dprintk("RPC: %5u rpcbind request timed out\n",
1937 /* server doesn't support any rpcbind version we know of */
1938 dprintk("RPC: %5u unrecognized remote rpcbind service\n",
1941 case -EPROTONOSUPPORT
:
1942 dprintk("RPC: %5u remote rpcbind version unavailable, retrying\n",
1945 case -ECONNREFUSED
: /* connection problems */
1955 dprintk("RPC: %5u remote rpcbind unreachable: %d\n",
1956 task
->tk_pid
, task
->tk_status
);
1957 if (!RPC_IS_SOFTCONN(task
)) {
1958 rpc_delay(task
, 5*HZ
);
1961 status
= task
->tk_status
;
1964 dprintk("RPC: %5u unrecognized rpcbind error (%d)\n",
1965 task
->tk_pid
, -task
->tk_status
);
1968 rpc_call_rpcerror(task
, status
);
1972 task
->tk_status
= 0;
1973 task
->tk_action
= call_bind
;
1974 rpc_check_timeout(task
);
1978 * 4b. Connect to the RPC server
1981 call_connect(struct rpc_task
*task
)
1983 struct rpc_xprt
*xprt
= task
->tk_rqstp
->rq_xprt
;
1985 if (rpc_task_transmitted(task
)) {
1986 rpc_task_handle_transmitted(task
);
1990 if (xprt_connected(xprt
)) {
1991 task
->tk_action
= call_transmit
;
1995 dprintk("RPC: %5u call_connect xprt %p %s connected\n",
1997 (xprt_connected(xprt
) ? "is" : "is not"));
1999 task
->tk_action
= call_connect_status
;
2000 if (task
->tk_status
< 0)
2002 if (task
->tk_flags
& RPC_TASK_NOCONNECT
) {
2003 rpc_call_rpcerror(task
, -ENOTCONN
);
2006 if (!xprt_prepare_transmit(task
))
2012 * 4c. Sort out connect result
2015 call_connect_status(struct rpc_task
*task
)
2017 struct rpc_clnt
*clnt
= task
->tk_client
;
2018 int status
= task
->tk_status
;
2020 if (rpc_task_transmitted(task
)) {
2021 rpc_task_handle_transmitted(task
);
2025 dprint_status(task
);
2027 trace_rpc_connect_status(task
);
2028 task
->tk_status
= 0;
2031 /* A positive refusal suggests a rebind is needed. */
2032 if (RPC_IS_SOFTCONN(task
))
2034 if (clnt
->cl_autobind
) {
2035 rpc_force_rebind(clnt
);
2047 xprt_conditional_disconnect(task
->tk_rqstp
->rq_xprt
,
2048 task
->tk_rqstp
->rq_connect_cookie
);
2049 if (RPC_IS_SOFTCONN(task
))
2051 /* retry with existing socket, after a delay */
2052 rpc_delay(task
, 3*HZ
);
2059 clnt
->cl_stats
->netreconn
++;
2060 task
->tk_action
= call_transmit
;
2063 rpc_call_rpcerror(task
, status
);
2066 /* Check for timeouts before looping back to call_bind */
2067 task
->tk_action
= call_bind
;
2068 rpc_check_timeout(task
);
2072 * 5. Transmit the RPC request, and wait for reply
2075 call_transmit(struct rpc_task
*task
)
2077 if (rpc_task_transmitted(task
)) {
2078 rpc_task_handle_transmitted(task
);
2082 dprint_status(task
);
2084 task
->tk_action
= call_transmit_status
;
2085 if (!xprt_prepare_transmit(task
))
2087 task
->tk_status
= 0;
2088 if (test_bit(RPC_TASK_NEED_XMIT
, &task
->tk_runstate
)) {
2089 if (!xprt_connected(task
->tk_xprt
)) {
2090 task
->tk_status
= -ENOTCONN
;
2093 xprt_transmit(task
);
2095 xprt_end_transmit(task
);
2099 * 5a. Handle cleanup after a transmission
2102 call_transmit_status(struct rpc_task
*task
)
2104 task
->tk_action
= call_status
;
2107 * Common case: success. Force the compiler to put this
2110 if (rpc_task_transmitted(task
)) {
2111 task
->tk_status
= 0;
2112 xprt_request_wait_receive(task
);
2116 switch (task
->tk_status
) {
2118 dprint_status(task
);
2121 task
->tk_status
= 0;
2122 task
->tk_action
= call_encode
;
2125 * Special cases: if we've been waiting on the
2126 * socket's write_space() callback, or if the
2127 * socket just returned a connection error,
2128 * then hold onto the transport lock.
2131 rpc_delay(task
, HZ
>>2);
2135 task
->tk_action
= call_transmit
;
2136 task
->tk_status
= 0;
2144 if (RPC_IS_SOFTCONN(task
)) {
2145 if (!task
->tk_msg
.rpc_proc
->p_proc
)
2146 trace_xprt_ping(task
->tk_xprt
,
2148 rpc_call_rpcerror(task
, task
->tk_status
);
2157 task
->tk_action
= call_bind
;
2158 task
->tk_status
= 0;
2161 rpc_check_timeout(task
);
2164 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
2165 static void call_bc_transmit(struct rpc_task
*task
);
2166 static void call_bc_transmit_status(struct rpc_task
*task
);
2169 call_bc_encode(struct rpc_task
*task
)
2171 xprt_request_enqueue_transmit(task
);
2172 task
->tk_action
= call_bc_transmit
;
2176 * 5b. Send the backchannel RPC reply. On error, drop the reply. In
2177 * addition, disconnect on connectivity errors.
2180 call_bc_transmit(struct rpc_task
*task
)
2182 task
->tk_action
= call_bc_transmit_status
;
2183 if (test_bit(RPC_TASK_NEED_XMIT
, &task
->tk_runstate
)) {
2184 if (!xprt_prepare_transmit(task
))
2186 task
->tk_status
= 0;
2187 xprt_transmit(task
);
2189 xprt_end_transmit(task
);
2193 call_bc_transmit_status(struct rpc_task
*task
)
2195 struct rpc_rqst
*req
= task
->tk_rqstp
;
2197 if (rpc_task_transmitted(task
))
2198 task
->tk_status
= 0;
2200 dprint_status(task
);
2202 switch (task
->tk_status
) {
2216 rpc_delay(task
, HZ
>>2);
2220 task
->tk_status
= 0;
2221 task
->tk_action
= call_bc_transmit
;
2225 * Problem reaching the server. Disconnect and let the
2226 * forechannel reestablish the connection. The server will
2227 * have to retransmit the backchannel request and we'll
2228 * reprocess it. Since these ops are idempotent, there's no
2229 * need to cache our reply at this time.
2231 printk(KERN_NOTICE
"RPC: Could not send backchannel reply "
2232 "error: %d\n", task
->tk_status
);
2233 xprt_conditional_disconnect(req
->rq_xprt
,
2234 req
->rq_connect_cookie
);
2238 * We were unable to reply and will have to drop the
2239 * request. The server should reconnect and retransmit.
2241 printk(KERN_NOTICE
"RPC: Could not send backchannel reply "
2242 "error: %d\n", task
->tk_status
);
2245 task
->tk_action
= rpc_exit_task
;
2247 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
2250 * 6. Sort out the RPC call status
2253 call_status(struct rpc_task
*task
)
2255 struct rpc_clnt
*clnt
= task
->tk_client
;
2258 if (!task
->tk_msg
.rpc_proc
->p_proc
)
2259 trace_xprt_ping(task
->tk_xprt
, task
->tk_status
);
2261 dprint_status(task
);
2263 status
= task
->tk_status
;
2265 task
->tk_action
= call_decode
;
2269 trace_rpc_call_status(task
);
2270 task
->tk_status
= 0;
2277 if (RPC_IS_SOFTCONN(task
))
2280 * Delay any retries for 3 seconds, then handle as if it
2283 rpc_delay(task
, 3*HZ
);
2290 rpc_force_rebind(clnt
);
2293 rpc_delay(task
, 3*HZ
);
2300 /* shutdown or soft timeout */
2303 if (clnt
->cl_chatty
)
2304 printk("%s: RPC call returned error %d\n",
2305 clnt
->cl_program
->name
, -status
);
2308 task
->tk_action
= call_encode
;
2309 rpc_check_timeout(task
);
2312 rpc_call_rpcerror(task
, status
);
2316 rpc_check_connected(const struct rpc_rqst
*req
)
2318 /* No allocated request or transport? return true */
2319 if (!req
|| !req
->rq_xprt
)
2321 return xprt_connected(req
->rq_xprt
);
2325 rpc_check_timeout(struct rpc_task
*task
)
2327 struct rpc_clnt
*clnt
= task
->tk_client
;
2329 if (xprt_adjust_timeout(task
->tk_rqstp
) == 0)
2332 dprintk("RPC: %5u call_timeout (major)\n", task
->tk_pid
);
2333 task
->tk_timeouts
++;
2335 if (RPC_IS_SOFTCONN(task
) && !rpc_check_connected(task
->tk_rqstp
)) {
2336 rpc_call_rpcerror(task
, -ETIMEDOUT
);
2340 if (RPC_IS_SOFT(task
)) {
2342 * Once a "no retrans timeout" soft tasks (a.k.a NFSv4) has
2343 * been sent, it should time out only if the transport
2344 * connection gets terminally broken.
2346 if ((task
->tk_flags
& RPC_TASK_NO_RETRANS_TIMEOUT
) &&
2347 rpc_check_connected(task
->tk_rqstp
))
2350 if (clnt
->cl_chatty
) {
2351 pr_notice_ratelimited(
2352 "%s: server %s not responding, timed out\n",
2353 clnt
->cl_program
->name
,
2354 task
->tk_xprt
->servername
);
2356 if (task
->tk_flags
& RPC_TASK_TIMEOUT
)
2357 rpc_call_rpcerror(task
, -ETIMEDOUT
);
2359 __rpc_call_rpcerror(task
, -EIO
, -ETIMEDOUT
);
2363 if (!(task
->tk_flags
& RPC_CALL_MAJORSEEN
)) {
2364 task
->tk_flags
|= RPC_CALL_MAJORSEEN
;
2365 if (clnt
->cl_chatty
) {
2366 pr_notice_ratelimited(
2367 "%s: server %s not responding, still trying\n",
2368 clnt
->cl_program
->name
,
2369 task
->tk_xprt
->servername
);
2372 rpc_force_rebind(clnt
);
2374 * Did our request time out due to an RPCSEC_GSS out-of-sequence
2375 * event? RFC2203 requires the server to drop all such requests.
2377 rpcauth_invalcred(task
);
2381 * 7. Decode the RPC reply
2384 call_decode(struct rpc_task
*task
)
2386 struct rpc_clnt
*clnt
= task
->tk_client
;
2387 struct rpc_rqst
*req
= task
->tk_rqstp
;
2388 struct xdr_stream xdr
;
2390 dprint_status(task
);
2392 if (!task
->tk_msg
.rpc_proc
->p_decode
) {
2393 task
->tk_action
= rpc_exit_task
;
2397 if (task
->tk_flags
& RPC_CALL_MAJORSEEN
) {
2398 if (clnt
->cl_chatty
) {
2399 pr_notice_ratelimited("%s: server %s OK\n",
2400 clnt
->cl_program
->name
,
2401 task
->tk_xprt
->servername
);
2403 task
->tk_flags
&= ~RPC_CALL_MAJORSEEN
;
2407 * Ensure that we see all writes made by xprt_complete_rqst()
2408 * before it changed req->rq_reply_bytes_recvd.
2411 req
->rq_rcv_buf
.len
= req
->rq_private_buf
.len
;
2413 /* Check that the softirq receive buffer is valid */
2414 WARN_ON(memcmp(&req
->rq_rcv_buf
, &req
->rq_private_buf
,
2415 sizeof(req
->rq_rcv_buf
)) != 0);
2417 xdr_init_decode(&xdr
, &req
->rq_rcv_buf
,
2418 req
->rq_rcv_buf
.head
[0].iov_base
, req
);
2419 switch (rpc_decode_header(task
, &xdr
)) {
2421 task
->tk_action
= rpc_exit_task
;
2422 task
->tk_status
= rpcauth_unwrap_resp(task
, &xdr
);
2423 dprintk("RPC: %5u %s result %d\n",
2424 task
->tk_pid
, __func__
, task
->tk_status
);
2427 task
->tk_status
= 0;
2428 /* Note: rpc_decode_header() may have freed the RPC slot */
2429 if (task
->tk_rqstp
== req
) {
2430 xdr_free_bvec(&req
->rq_rcv_buf
);
2431 req
->rq_reply_bytes_recvd
= 0;
2432 req
->rq_rcv_buf
.len
= 0;
2433 if (task
->tk_client
->cl_discrtry
)
2434 xprt_conditional_disconnect(req
->rq_xprt
,
2435 req
->rq_connect_cookie
);
2437 task
->tk_action
= call_encode
;
2438 rpc_check_timeout(task
);
2443 rpc_encode_header(struct rpc_task
*task
, struct xdr_stream
*xdr
)
2445 struct rpc_clnt
*clnt
= task
->tk_client
;
2446 struct rpc_rqst
*req
= task
->tk_rqstp
;
2451 p
= xdr_reserve_space(xdr
, RPC_CALLHDRSIZE
<< 2);
2456 *p
++ = cpu_to_be32(RPC_VERSION
);
2457 *p
++ = cpu_to_be32(clnt
->cl_prog
);
2458 *p
++ = cpu_to_be32(clnt
->cl_vers
);
2459 *p
= cpu_to_be32(task
->tk_msg
.rpc_proc
->p_proc
);
2461 error
= rpcauth_marshcred(task
, xdr
);
2466 trace_rpc_bad_callhdr(task
);
2467 rpc_exit(task
, error
);
2472 rpc_decode_header(struct rpc_task
*task
, struct xdr_stream
*xdr
)
2474 struct rpc_clnt
*clnt
= task
->tk_client
;
2478 /* RFC-1014 says that the representation of XDR data must be a
2479 * multiple of four bytes
2480 * - if it isn't pointer subtraction in the NFS client may give
2483 if (task
->tk_rqstp
->rq_rcv_buf
.len
& 3)
2484 goto out_unparsable
;
2486 p
= xdr_inline_decode(xdr
, 3 * sizeof(*p
));
2488 goto out_unparsable
;
2490 if (*p
++ != rpc_reply
)
2491 goto out_unparsable
;
2492 if (*p
++ != rpc_msg_accepted
)
2493 goto out_msg_denied
;
2495 error
= rpcauth_checkverf(task
, xdr
);
2499 p
= xdr_inline_decode(xdr
, sizeof(*p
));
2501 goto out_unparsable
;
2505 case rpc_prog_unavail
:
2506 trace_rpc__prog_unavail(task
);
2507 error
= -EPFNOSUPPORT
;
2509 case rpc_prog_mismatch
:
2510 trace_rpc__prog_mismatch(task
);
2511 error
= -EPROTONOSUPPORT
;
2513 case rpc_proc_unavail
:
2514 trace_rpc__proc_unavail(task
);
2515 error
= -EOPNOTSUPP
;
2517 case rpc_garbage_args
:
2518 case rpc_system_err
:
2519 trace_rpc__garbage_args(task
);
2523 goto out_unparsable
;
2527 clnt
->cl_stats
->rpcgarbage
++;
2528 if (task
->tk_garb_retry
) {
2529 task
->tk_garb_retry
--;
2530 task
->tk_action
= call_encode
;
2534 rpc_exit(task
, error
);
2538 trace_rpc__unparsable(task
);
2543 trace_rpc_bad_verifier(task
);
2548 p
= xdr_inline_decode(xdr
, sizeof(*p
));
2550 goto out_unparsable
;
2552 case rpc_auth_error
:
2555 trace_rpc__mismatch(task
);
2556 error
= -EPROTONOSUPPORT
;
2559 goto out_unparsable
;
2562 p
= xdr_inline_decode(xdr
, sizeof(*p
));
2564 goto out_unparsable
;
2566 case rpc_autherr_rejectedcred
:
2567 case rpc_autherr_rejectedverf
:
2568 case rpcsec_gsserr_credproblem
:
2569 case rpcsec_gsserr_ctxproblem
:
2570 if (!task
->tk_cred_retry
)
2572 task
->tk_cred_retry
--;
2573 trace_rpc__stale_creds(task
);
2574 rpcauth_invalcred(task
);
2575 /* Ensure we obtain a new XID! */
2577 task
->tk_action
= call_reserve
;
2579 case rpc_autherr_badcred
:
2580 case rpc_autherr_badverf
:
2581 /* possibly garbled cred/verf? */
2582 if (!task
->tk_garb_retry
)
2584 task
->tk_garb_retry
--;
2585 trace_rpc__bad_creds(task
);
2586 task
->tk_action
= call_encode
;
2588 case rpc_autherr_tooweak
:
2589 trace_rpc__auth_tooweak(task
);
2590 pr_warn("RPC: server %s requires stronger authentication.\n",
2591 task
->tk_xprt
->servername
);
2594 goto out_unparsable
;
2599 static void rpcproc_encode_null(struct rpc_rqst
*rqstp
, struct xdr_stream
*xdr
,
2604 static int rpcproc_decode_null(struct rpc_rqst
*rqstp
, struct xdr_stream
*xdr
,
2610 static const struct rpc_procinfo rpcproc_null
= {
2611 .p_encode
= rpcproc_encode_null
,
2612 .p_decode
= rpcproc_decode_null
,
2615 static int rpc_ping(struct rpc_clnt
*clnt
)
2617 struct rpc_message msg
= {
2618 .rpc_proc
= &rpcproc_null
,
2621 err
= rpc_call_sync(clnt
, &msg
, RPC_TASK_SOFT
| RPC_TASK_SOFTCONN
|
2622 RPC_TASK_NULLCREDS
);
2627 struct rpc_task
*rpc_call_null_helper(struct rpc_clnt
*clnt
,
2628 struct rpc_xprt
*xprt
, struct rpc_cred
*cred
, int flags
,
2629 const struct rpc_call_ops
*ops
, void *data
)
2631 struct rpc_message msg
= {
2632 .rpc_proc
= &rpcproc_null
,
2634 struct rpc_task_setup task_setup_data
= {
2637 .rpc_message
= &msg
,
2638 .rpc_op_cred
= cred
,
2639 .callback_ops
= (ops
!= NULL
) ? ops
: &rpc_default_ops
,
2640 .callback_data
= data
,
2641 .flags
= flags
| RPC_TASK_NULLCREDS
,
2644 return rpc_run_task(&task_setup_data
);
2647 struct rpc_task
*rpc_call_null(struct rpc_clnt
*clnt
, struct rpc_cred
*cred
, int flags
)
2649 return rpc_call_null_helper(clnt
, NULL
, cred
, flags
, NULL
, NULL
);
2651 EXPORT_SYMBOL_GPL(rpc_call_null
);
2653 struct rpc_cb_add_xprt_calldata
{
2654 struct rpc_xprt_switch
*xps
;
2655 struct rpc_xprt
*xprt
;
2658 static void rpc_cb_add_xprt_done(struct rpc_task
*task
, void *calldata
)
2660 struct rpc_cb_add_xprt_calldata
*data
= calldata
;
2662 if (task
->tk_status
== 0)
2663 rpc_xprt_switch_add_xprt(data
->xps
, data
->xprt
);
2666 static void rpc_cb_add_xprt_release(void *calldata
)
2668 struct rpc_cb_add_xprt_calldata
*data
= calldata
;
2670 xprt_put(data
->xprt
);
2671 xprt_switch_put(data
->xps
);
2675 static const struct rpc_call_ops rpc_cb_add_xprt_call_ops
= {
2676 .rpc_call_done
= rpc_cb_add_xprt_done
,
2677 .rpc_release
= rpc_cb_add_xprt_release
,
2681 * rpc_clnt_test_and_add_xprt - Test and add a new transport to a rpc_clnt
2682 * @clnt: pointer to struct rpc_clnt
2683 * @xps: pointer to struct rpc_xprt_switch,
2684 * @xprt: pointer struct rpc_xprt
2687 int rpc_clnt_test_and_add_xprt(struct rpc_clnt
*clnt
,
2688 struct rpc_xprt_switch
*xps
, struct rpc_xprt
*xprt
,
2691 struct rpc_cb_add_xprt_calldata
*data
;
2692 struct rpc_task
*task
;
2694 data
= kmalloc(sizeof(*data
), GFP_NOFS
);
2697 data
->xps
= xprt_switch_get(xps
);
2698 data
->xprt
= xprt_get(xprt
);
2700 task
= rpc_call_null_helper(clnt
, xprt
, NULL
,
2701 RPC_TASK_SOFT
|RPC_TASK_SOFTCONN
|RPC_TASK_ASYNC
|RPC_TASK_NULLCREDS
,
2702 &rpc_cb_add_xprt_call_ops
, data
);
2704 return PTR_ERR(task
);
2708 EXPORT_SYMBOL_GPL(rpc_clnt_test_and_add_xprt
);
2711 * rpc_clnt_setup_test_and_add_xprt()
2713 * This is an rpc_clnt_add_xprt setup() function which returns 1 so:
2714 * 1) caller of the test function must dereference the rpc_xprt_switch
2716 * 2) test function must call rpc_xprt_switch_add_xprt, usually in
2717 * the rpc_call_done routine.
2719 * Upon success (return of 1), the test function adds the new
2720 * transport to the rpc_clnt xprt switch
2722 * @clnt: struct rpc_clnt to get the new transport
2723 * @xps: the rpc_xprt_switch to hold the new transport
2724 * @xprt: the rpc_xprt to test
2725 * @data: a struct rpc_add_xprt_test pointer that holds the test function
2726 * and test function call data
2728 int rpc_clnt_setup_test_and_add_xprt(struct rpc_clnt
*clnt
,
2729 struct rpc_xprt_switch
*xps
,
2730 struct rpc_xprt
*xprt
,
2733 struct rpc_task
*task
;
2734 struct rpc_add_xprt_test
*xtest
= (struct rpc_add_xprt_test
*)data
;
2735 int status
= -EADDRINUSE
;
2737 xprt
= xprt_get(xprt
);
2738 xprt_switch_get(xps
);
2740 if (rpc_xprt_switch_has_addr(xps
, (struct sockaddr
*)&xprt
->addr
))
2743 /* Test the connection */
2744 task
= rpc_call_null_helper(clnt
, xprt
, NULL
,
2745 RPC_TASK_SOFT
| RPC_TASK_SOFTCONN
| RPC_TASK_NULLCREDS
,
2748 status
= PTR_ERR(task
);
2751 status
= task
->tk_status
;
2757 /* rpc_xprt_switch and rpc_xprt are deferrenced by add_xprt_test() */
2758 xtest
->add_xprt_test(clnt
, xprt
, xtest
->data
);
2761 xprt_switch_put(xps
);
2763 /* so that rpc_clnt_add_xprt does not call rpc_xprt_switch_add_xprt */
2767 xprt_switch_put(xps
);
2768 pr_info("RPC: rpc_clnt_test_xprt failed: %d addr %s not added\n",
2769 status
, xprt
->address_strings
[RPC_DISPLAY_ADDR
]);
2772 EXPORT_SYMBOL_GPL(rpc_clnt_setup_test_and_add_xprt
);
2775 * rpc_clnt_add_xprt - Add a new transport to a rpc_clnt
2776 * @clnt: pointer to struct rpc_clnt
2777 * @xprtargs: pointer to struct xprt_create
2778 * @setup: callback to test and/or set up the connection
2779 * @data: pointer to setup function data
2781 * Creates a new transport using the parameters set in args and
2783 * If ping is set, then test that connectivity succeeds before
2784 * adding the new transport.
2787 int rpc_clnt_add_xprt(struct rpc_clnt
*clnt
,
2788 struct xprt_create
*xprtargs
,
2789 int (*setup
)(struct rpc_clnt
*,
2790 struct rpc_xprt_switch
*,
2795 struct rpc_xprt_switch
*xps
;
2796 struct rpc_xprt
*xprt
;
2797 unsigned long connect_timeout
;
2798 unsigned long reconnect_timeout
;
2799 unsigned char resvport
;
2803 xps
= xprt_switch_get(rcu_dereference(clnt
->cl_xpi
.xpi_xpswitch
));
2804 xprt
= xprt_iter_xprt(&clnt
->cl_xpi
);
2805 if (xps
== NULL
|| xprt
== NULL
) {
2809 resvport
= xprt
->resvport
;
2810 connect_timeout
= xprt
->connect_timeout
;
2811 reconnect_timeout
= xprt
->max_reconnect_timeout
;
2814 xprt
= xprt_create_transport(xprtargs
);
2816 ret
= PTR_ERR(xprt
);
2817 goto out_put_switch
;
2819 xprt
->resvport
= resvport
;
2820 if (xprt
->ops
->set_connect_timeout
!= NULL
)
2821 xprt
->ops
->set_connect_timeout(xprt
,
2825 rpc_xprt_switch_set_roundrobin(xps
);
2827 ret
= setup(clnt
, xps
, xprt
, data
);
2831 rpc_xprt_switch_add_xprt(xps
, xprt
);
2835 xprt_switch_put(xps
);
2838 EXPORT_SYMBOL_GPL(rpc_clnt_add_xprt
);
2840 struct connect_timeout_data
{
2841 unsigned long connect_timeout
;
2842 unsigned long reconnect_timeout
;
2846 rpc_xprt_set_connect_timeout(struct rpc_clnt
*clnt
,
2847 struct rpc_xprt
*xprt
,
2850 struct connect_timeout_data
*timeo
= data
;
2852 if (xprt
->ops
->set_connect_timeout
)
2853 xprt
->ops
->set_connect_timeout(xprt
,
2854 timeo
->connect_timeout
,
2855 timeo
->reconnect_timeout
);
2860 rpc_set_connect_timeout(struct rpc_clnt
*clnt
,
2861 unsigned long connect_timeout
,
2862 unsigned long reconnect_timeout
)
2864 struct connect_timeout_data timeout
= {
2865 .connect_timeout
= connect_timeout
,
2866 .reconnect_timeout
= reconnect_timeout
,
2868 rpc_clnt_iterate_for_each_xprt(clnt
,
2869 rpc_xprt_set_connect_timeout
,
2872 EXPORT_SYMBOL_GPL(rpc_set_connect_timeout
);
2874 void rpc_clnt_xprt_switch_put(struct rpc_clnt
*clnt
)
2877 xprt_switch_put(rcu_dereference(clnt
->cl_xpi
.xpi_xpswitch
));
2880 EXPORT_SYMBOL_GPL(rpc_clnt_xprt_switch_put
);
2882 void rpc_clnt_xprt_switch_add_xprt(struct rpc_clnt
*clnt
, struct rpc_xprt
*xprt
)
2885 rpc_xprt_switch_add_xprt(rcu_dereference(clnt
->cl_xpi
.xpi_xpswitch
),
2889 EXPORT_SYMBOL_GPL(rpc_clnt_xprt_switch_add_xprt
);
2891 bool rpc_clnt_xprt_switch_has_addr(struct rpc_clnt
*clnt
,
2892 const struct sockaddr
*sap
)
2894 struct rpc_xprt_switch
*xps
;
2898 xps
= rcu_dereference(clnt
->cl_xpi
.xpi_xpswitch
);
2899 ret
= rpc_xprt_switch_has_addr(xps
, sap
);
2903 EXPORT_SYMBOL_GPL(rpc_clnt_xprt_switch_has_addr
);
2905 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
2906 static void rpc_show_header(void)
2908 printk(KERN_INFO
"-pid- flgs status -client- --rqstp- "
2909 "-timeout ---ops--\n");
2912 static void rpc_show_task(const struct rpc_clnt
*clnt
,
2913 const struct rpc_task
*task
)
2915 const char *rpc_waitq
= "none";
2917 if (RPC_IS_QUEUED(task
))
2918 rpc_waitq
= rpc_qname(task
->tk_waitqueue
);
2920 printk(KERN_INFO
"%5u %04x %6d %8p %8p %8ld %8p %sv%u %s a:%ps q:%s\n",
2921 task
->tk_pid
, task
->tk_flags
, task
->tk_status
,
2922 clnt
, task
->tk_rqstp
, rpc_task_timeout(task
), task
->tk_ops
,
2923 clnt
->cl_program
->name
, clnt
->cl_vers
, rpc_proc_name(task
),
2924 task
->tk_action
, rpc_waitq
);
2927 void rpc_show_tasks(struct net
*net
)
2929 struct rpc_clnt
*clnt
;
2930 struct rpc_task
*task
;
2932 struct sunrpc_net
*sn
= net_generic(net
, sunrpc_net_id
);
2934 spin_lock(&sn
->rpc_client_lock
);
2935 list_for_each_entry(clnt
, &sn
->all_clients
, cl_clients
) {
2936 spin_lock(&clnt
->cl_lock
);
2937 list_for_each_entry(task
, &clnt
->cl_tasks
, tk_task
) {
2942 rpc_show_task(clnt
, task
);
2944 spin_unlock(&clnt
->cl_lock
);
2946 spin_unlock(&sn
->rpc_client_lock
);
2950 #if IS_ENABLED(CONFIG_SUNRPC_SWAP)
2952 rpc_clnt_swap_activate_callback(struct rpc_clnt
*clnt
,
2953 struct rpc_xprt
*xprt
,
2956 return xprt_enable_swap(xprt
);
2960 rpc_clnt_swap_activate(struct rpc_clnt
*clnt
)
2962 if (atomic_inc_return(&clnt
->cl_swapper
) == 1)
2963 return rpc_clnt_iterate_for_each_xprt(clnt
,
2964 rpc_clnt_swap_activate_callback
, NULL
);
2967 EXPORT_SYMBOL_GPL(rpc_clnt_swap_activate
);
2970 rpc_clnt_swap_deactivate_callback(struct rpc_clnt
*clnt
,
2971 struct rpc_xprt
*xprt
,
2974 xprt_disable_swap(xprt
);
2979 rpc_clnt_swap_deactivate(struct rpc_clnt
*clnt
)
2981 if (atomic_dec_if_positive(&clnt
->cl_swapper
) == 0)
2982 rpc_clnt_iterate_for_each_xprt(clnt
,
2983 rpc_clnt_swap_deactivate_callback
, NULL
);
2985 EXPORT_SYMBOL_GPL(rpc_clnt_swap_deactivate
);
2986 #endif /* CONFIG_SUNRPC_SWAP */