1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/net/sunrpc/clnt.c
5 * This file contains the high-level RPC interface.
6 * It is modeled as a finite state machine to support both synchronous
7 * and asynchronous requests.
9 * - RPC header generation and argument serialization.
10 * - Credential refresh.
11 * - TCP connect handling.
12 * - Retry of operation when it is suspected the operation failed because
13 * of uid squashing on the server, or when the credentials were stale
14 * and need to be refreshed, or when a packet was damaged in transit.
15 * This may be have to be moved to the VFS layer.
17 * Copyright (C) 1992,1993 Rick Sladkey <jrs@world.std.com>
18 * Copyright (C) 1995,1996 Olaf Kirch <okir@monad.swb.de>
22 #include <linux/module.h>
23 #include <linux/types.h>
24 #include <linux/kallsyms.h>
26 #include <linux/namei.h>
27 #include <linux/mount.h>
28 #include <linux/slab.h>
29 #include <linux/rcupdate.h>
30 #include <linux/utsname.h>
31 #include <linux/workqueue.h>
33 #include <linux/in6.h>
36 #include <linux/sunrpc/clnt.h>
37 #include <linux/sunrpc/addr.h>
38 #include <linux/sunrpc/rpc_pipe_fs.h>
39 #include <linux/sunrpc/metrics.h>
40 #include <linux/sunrpc/bc_xprt.h>
41 #include <trace/events/sunrpc.h>
46 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
47 # define RPCDBG_FACILITY RPCDBG_CALL
50 #define dprint_status(t) \
51 dprintk("RPC: %5u %s (status %d)\n", t->tk_pid, \
52 __func__, t->tk_status)
55 * All RPC clients are linked into this list
58 static DECLARE_WAIT_QUEUE_HEAD(destroy_wait
);
61 static void call_start(struct rpc_task
*task
);
62 static void call_reserve(struct rpc_task
*task
);
63 static void call_reserveresult(struct rpc_task
*task
);
64 static void call_allocate(struct rpc_task
*task
);
65 static void call_encode(struct rpc_task
*task
);
66 static void call_decode(struct rpc_task
*task
);
67 static void call_bind(struct rpc_task
*task
);
68 static void call_bind_status(struct rpc_task
*task
);
69 static void call_transmit(struct rpc_task
*task
);
70 static void call_status(struct rpc_task
*task
);
71 static void call_transmit_status(struct rpc_task
*task
);
72 static void call_refresh(struct rpc_task
*task
);
73 static void call_refreshresult(struct rpc_task
*task
);
74 static void call_connect(struct rpc_task
*task
);
75 static void call_connect_status(struct rpc_task
*task
);
77 static int rpc_encode_header(struct rpc_task
*task
,
78 struct xdr_stream
*xdr
);
79 static int rpc_decode_header(struct rpc_task
*task
,
80 struct xdr_stream
*xdr
);
81 static int rpc_ping(struct rpc_clnt
*clnt
);
82 static void rpc_check_timeout(struct rpc_task
*task
);
84 static void rpc_register_client(struct rpc_clnt
*clnt
)
86 struct net
*net
= rpc_net_ns(clnt
);
87 struct sunrpc_net
*sn
= net_generic(net
, sunrpc_net_id
);
89 spin_lock(&sn
->rpc_client_lock
);
90 list_add(&clnt
->cl_clients
, &sn
->all_clients
);
91 spin_unlock(&sn
->rpc_client_lock
);
94 static void rpc_unregister_client(struct rpc_clnt
*clnt
)
96 struct net
*net
= rpc_net_ns(clnt
);
97 struct sunrpc_net
*sn
= net_generic(net
, sunrpc_net_id
);
99 spin_lock(&sn
->rpc_client_lock
);
100 list_del(&clnt
->cl_clients
);
101 spin_unlock(&sn
->rpc_client_lock
);
104 static void __rpc_clnt_remove_pipedir(struct rpc_clnt
*clnt
)
106 rpc_remove_client_dir(clnt
);
109 static void rpc_clnt_remove_pipedir(struct rpc_clnt
*clnt
)
111 struct net
*net
= rpc_net_ns(clnt
);
112 struct super_block
*pipefs_sb
;
114 pipefs_sb
= rpc_get_sb_net(net
);
116 __rpc_clnt_remove_pipedir(clnt
);
121 static struct dentry
*rpc_setup_pipedir_sb(struct super_block
*sb
,
122 struct rpc_clnt
*clnt
)
124 static uint32_t clntid
;
125 const char *dir_name
= clnt
->cl_program
->pipe_dir_name
;
127 struct dentry
*dir
, *dentry
;
129 dir
= rpc_d_lookup_sb(sb
, dir_name
);
131 pr_info("RPC: pipefs directory doesn't exist: %s\n", dir_name
);
135 snprintf(name
, sizeof(name
), "clnt%x", (unsigned int)clntid
++);
136 name
[sizeof(name
) - 1] = '\0';
137 dentry
= rpc_create_client_dir(dir
, name
, clnt
);
140 if (dentry
== ERR_PTR(-EEXIST
))
142 printk(KERN_INFO
"RPC: Couldn't create pipefs entry"
143 " %s/%s, error %ld\n",
144 dir_name
, name
, PTR_ERR(dentry
));
152 rpc_setup_pipedir(struct super_block
*pipefs_sb
, struct rpc_clnt
*clnt
)
154 struct dentry
*dentry
;
156 if (clnt
->cl_program
->pipe_dir_name
!= NULL
) {
157 dentry
= rpc_setup_pipedir_sb(pipefs_sb
, clnt
);
159 return PTR_ERR(dentry
);
164 static int rpc_clnt_skip_event(struct rpc_clnt
*clnt
, unsigned long event
)
166 if (clnt
->cl_program
->pipe_dir_name
== NULL
)
170 case RPC_PIPEFS_MOUNT
:
171 if (clnt
->cl_pipedir_objects
.pdh_dentry
!= NULL
)
173 if (atomic_read(&clnt
->cl_count
) == 0)
176 case RPC_PIPEFS_UMOUNT
:
177 if (clnt
->cl_pipedir_objects
.pdh_dentry
== NULL
)
184 static int __rpc_clnt_handle_event(struct rpc_clnt
*clnt
, unsigned long event
,
185 struct super_block
*sb
)
187 struct dentry
*dentry
;
190 case RPC_PIPEFS_MOUNT
:
191 dentry
= rpc_setup_pipedir_sb(sb
, clnt
);
195 return PTR_ERR(dentry
);
197 case RPC_PIPEFS_UMOUNT
:
198 __rpc_clnt_remove_pipedir(clnt
);
201 printk(KERN_ERR
"%s: unknown event: %ld\n", __func__
, event
);
207 static int __rpc_pipefs_event(struct rpc_clnt
*clnt
, unsigned long event
,
208 struct super_block
*sb
)
212 for (;; clnt
= clnt
->cl_parent
) {
213 if (!rpc_clnt_skip_event(clnt
, event
))
214 error
= __rpc_clnt_handle_event(clnt
, event
, sb
);
215 if (error
|| clnt
== clnt
->cl_parent
)
221 static struct rpc_clnt
*rpc_get_client_for_event(struct net
*net
, int event
)
223 struct sunrpc_net
*sn
= net_generic(net
, sunrpc_net_id
);
224 struct rpc_clnt
*clnt
;
226 spin_lock(&sn
->rpc_client_lock
);
227 list_for_each_entry(clnt
, &sn
->all_clients
, cl_clients
) {
228 if (rpc_clnt_skip_event(clnt
, event
))
230 spin_unlock(&sn
->rpc_client_lock
);
233 spin_unlock(&sn
->rpc_client_lock
);
237 static int rpc_pipefs_event(struct notifier_block
*nb
, unsigned long event
,
240 struct super_block
*sb
= ptr
;
241 struct rpc_clnt
*clnt
;
244 while ((clnt
= rpc_get_client_for_event(sb
->s_fs_info
, event
))) {
245 error
= __rpc_pipefs_event(clnt
, event
, sb
);
252 static struct notifier_block rpc_clients_block
= {
253 .notifier_call
= rpc_pipefs_event
,
254 .priority
= SUNRPC_PIPEFS_RPC_PRIO
,
257 int rpc_clients_notifier_register(void)
259 return rpc_pipefs_notifier_register(&rpc_clients_block
);
262 void rpc_clients_notifier_unregister(void)
264 return rpc_pipefs_notifier_unregister(&rpc_clients_block
);
267 static struct rpc_xprt
*rpc_clnt_set_transport(struct rpc_clnt
*clnt
,
268 struct rpc_xprt
*xprt
,
269 const struct rpc_timeout
*timeout
)
271 struct rpc_xprt
*old
;
273 spin_lock(&clnt
->cl_lock
);
274 old
= rcu_dereference_protected(clnt
->cl_xprt
,
275 lockdep_is_held(&clnt
->cl_lock
));
277 if (!xprt_bound(xprt
))
278 clnt
->cl_autobind
= 1;
280 clnt
->cl_timeout
= timeout
;
281 rcu_assign_pointer(clnt
->cl_xprt
, xprt
);
282 spin_unlock(&clnt
->cl_lock
);
287 static void rpc_clnt_set_nodename(struct rpc_clnt
*clnt
, const char *nodename
)
289 clnt
->cl_nodelen
= strlcpy(clnt
->cl_nodename
,
290 nodename
, sizeof(clnt
->cl_nodename
));
293 static int rpc_client_register(struct rpc_clnt
*clnt
,
294 rpc_authflavor_t pseudoflavor
,
295 const char *client_name
)
297 struct rpc_auth_create_args auth_args
= {
298 .pseudoflavor
= pseudoflavor
,
299 .target_name
= client_name
,
301 struct rpc_auth
*auth
;
302 struct net
*net
= rpc_net_ns(clnt
);
303 struct super_block
*pipefs_sb
;
306 rpc_clnt_debugfs_register(clnt
);
308 pipefs_sb
= rpc_get_sb_net(net
);
310 err
= rpc_setup_pipedir(pipefs_sb
, clnt
);
315 rpc_register_client(clnt
);
319 auth
= rpcauth_create(&auth_args
, clnt
);
321 dprintk("RPC: Couldn't create auth handle (flavor %u)\n",
328 pipefs_sb
= rpc_get_sb_net(net
);
329 rpc_unregister_client(clnt
);
330 __rpc_clnt_remove_pipedir(clnt
);
334 rpc_clnt_debugfs_unregister(clnt
);
338 static DEFINE_IDA(rpc_clids
);
340 void rpc_cleanup_clids(void)
342 ida_destroy(&rpc_clids
);
345 static int rpc_alloc_clid(struct rpc_clnt
*clnt
)
349 clid
= ida_simple_get(&rpc_clids
, 0, 0, GFP_KERNEL
);
352 clnt
->cl_clid
= clid
;
356 static void rpc_free_clid(struct rpc_clnt
*clnt
)
358 ida_simple_remove(&rpc_clids
, clnt
->cl_clid
);
361 static struct rpc_clnt
* rpc_new_client(const struct rpc_create_args
*args
,
362 struct rpc_xprt_switch
*xps
,
363 struct rpc_xprt
*xprt
,
364 struct rpc_clnt
*parent
)
366 const struct rpc_program
*program
= args
->program
;
367 const struct rpc_version
*version
;
368 struct rpc_clnt
*clnt
= NULL
;
369 const struct rpc_timeout
*timeout
;
370 const char *nodename
= args
->nodename
;
373 /* sanity check the name before trying to print it */
374 dprintk("RPC: creating %s client for %s (xprt %p)\n",
375 program
->name
, args
->servername
, xprt
);
382 if (args
->version
>= program
->nrvers
)
384 version
= program
->version
[args
->version
];
389 clnt
= kzalloc(sizeof(*clnt
), GFP_KERNEL
);
392 clnt
->cl_parent
= parent
? : clnt
;
394 err
= rpc_alloc_clid(clnt
);
398 clnt
->cl_cred
= get_cred(args
->cred
);
399 clnt
->cl_procinfo
= version
->procs
;
400 clnt
->cl_maxproc
= version
->nrprocs
;
401 clnt
->cl_prog
= args
->prognumber
? : program
->number
;
402 clnt
->cl_vers
= version
->number
;
403 clnt
->cl_stats
= program
->stats
;
404 clnt
->cl_metrics
= rpc_alloc_iostats(clnt
);
405 rpc_init_pipe_dir_head(&clnt
->cl_pipedir_objects
);
407 if (clnt
->cl_metrics
== NULL
)
409 clnt
->cl_program
= program
;
410 INIT_LIST_HEAD(&clnt
->cl_tasks
);
411 spin_lock_init(&clnt
->cl_lock
);
413 timeout
= xprt
->timeout
;
414 if (args
->timeout
!= NULL
) {
415 memcpy(&clnt
->cl_timeout_default
, args
->timeout
,
416 sizeof(clnt
->cl_timeout_default
));
417 timeout
= &clnt
->cl_timeout_default
;
420 rpc_clnt_set_transport(clnt
, xprt
, timeout
);
421 xprt_iter_init(&clnt
->cl_xpi
, xps
);
422 xprt_switch_put(xps
);
424 clnt
->cl_rtt
= &clnt
->cl_rtt_default
;
425 rpc_init_rtt(&clnt
->cl_rtt_default
, clnt
->cl_timeout
->to_initval
);
427 atomic_set(&clnt
->cl_count
, 1);
429 if (nodename
== NULL
)
430 nodename
= utsname()->nodename
;
431 /* save the nodename */
432 rpc_clnt_set_nodename(clnt
, nodename
);
434 err
= rpc_client_register(clnt
, args
->authflavor
, args
->client_name
);
438 atomic_inc(&parent
->cl_count
);
442 rpc_free_iostats(clnt
->cl_metrics
);
444 put_cred(clnt
->cl_cred
);
451 xprt_switch_put(xps
);
456 static struct rpc_clnt
*rpc_create_xprt(struct rpc_create_args
*args
,
457 struct rpc_xprt
*xprt
)
459 struct rpc_clnt
*clnt
= NULL
;
460 struct rpc_xprt_switch
*xps
;
462 if (args
->bc_xprt
&& args
->bc_xprt
->xpt_bc_xps
) {
463 WARN_ON_ONCE(!(args
->protocol
& XPRT_TRANSPORT_BC
));
464 xps
= args
->bc_xprt
->xpt_bc_xps
;
465 xprt_switch_get(xps
);
467 xps
= xprt_switch_alloc(xprt
, GFP_KERNEL
);
470 return ERR_PTR(-ENOMEM
);
473 xprt_switch_get(xps
);
474 xprt
->bc_xprt
->xpt_bc_xps
= xps
;
477 clnt
= rpc_new_client(args
, xps
, xprt
, NULL
);
481 if (!(args
->flags
& RPC_CLNT_CREATE_NOPING
)) {
482 int err
= rpc_ping(clnt
);
484 rpc_shutdown_client(clnt
);
489 clnt
->cl_softrtry
= 1;
490 if (args
->flags
& (RPC_CLNT_CREATE_HARDRTRY
|RPC_CLNT_CREATE_SOFTERR
)) {
491 clnt
->cl_softrtry
= 0;
492 if (args
->flags
& RPC_CLNT_CREATE_SOFTERR
)
493 clnt
->cl_softerr
= 1;
496 if (args
->flags
& RPC_CLNT_CREATE_AUTOBIND
)
497 clnt
->cl_autobind
= 1;
498 if (args
->flags
& RPC_CLNT_CREATE_NO_RETRANS_TIMEOUT
)
499 clnt
->cl_noretranstimeo
= 1;
500 if (args
->flags
& RPC_CLNT_CREATE_DISCRTRY
)
501 clnt
->cl_discrtry
= 1;
502 if (!(args
->flags
& RPC_CLNT_CREATE_QUIET
))
509 * rpc_create - create an RPC client and transport with one call
510 * @args: rpc_clnt create argument structure
512 * Creates and initializes an RPC transport and an RPC client.
514 * It can ping the server in order to determine if it is up, and to see if
515 * it supports this program and version. RPC_CLNT_CREATE_NOPING disables
516 * this behavior so asynchronous tasks can also use rpc_create.
518 struct rpc_clnt
*rpc_create(struct rpc_create_args
*args
)
520 struct rpc_xprt
*xprt
;
521 struct xprt_create xprtargs
= {
523 .ident
= args
->protocol
,
524 .srcaddr
= args
->saddress
,
525 .dstaddr
= args
->address
,
526 .addrlen
= args
->addrsize
,
527 .servername
= args
->servername
,
528 .bc_xprt
= args
->bc_xprt
,
531 struct rpc_clnt
*clnt
;
535 WARN_ON_ONCE(!(args
->protocol
& XPRT_TRANSPORT_BC
));
536 xprt
= args
->bc_xprt
->xpt_bc_xprt
;
539 return rpc_create_xprt(args
, xprt
);
543 if (args
->flags
& RPC_CLNT_CREATE_INFINITE_SLOTS
)
544 xprtargs
.flags
|= XPRT_CREATE_INFINITE_SLOTS
;
545 if (args
->flags
& RPC_CLNT_CREATE_NO_IDLE_TIMEOUT
)
546 xprtargs
.flags
|= XPRT_CREATE_NO_IDLE_TIMEOUT
;
548 * If the caller chooses not to specify a hostname, whip
549 * up a string representation of the passed-in address.
551 if (xprtargs
.servername
== NULL
) {
552 struct sockaddr_un
*sun
=
553 (struct sockaddr_un
*)args
->address
;
554 struct sockaddr_in
*sin
=
555 (struct sockaddr_in
*)args
->address
;
556 struct sockaddr_in6
*sin6
=
557 (struct sockaddr_in6
*)args
->address
;
559 servername
[0] = '\0';
560 switch (args
->address
->sa_family
) {
562 snprintf(servername
, sizeof(servername
), "%s",
566 snprintf(servername
, sizeof(servername
), "%pI4",
567 &sin
->sin_addr
.s_addr
);
570 snprintf(servername
, sizeof(servername
), "%pI6",
574 /* caller wants default server name, but
575 * address family isn't recognized. */
576 return ERR_PTR(-EINVAL
);
578 xprtargs
.servername
= servername
;
581 xprt
= xprt_create_transport(&xprtargs
);
583 return (struct rpc_clnt
*)xprt
;
586 * By default, kernel RPC client connects from a reserved port.
587 * CAP_NET_BIND_SERVICE will not be set for unprivileged requesters,
588 * but it is always enabled for rpciod, which handles the connect
592 if (args
->flags
& RPC_CLNT_CREATE_NONPRIVPORT
)
595 if (args
->flags
& RPC_CLNT_CREATE_REUSEPORT
)
598 clnt
= rpc_create_xprt(args
, xprt
);
599 if (IS_ERR(clnt
) || args
->nconnect
<= 1)
602 for (i
= 0; i
< args
->nconnect
- 1; i
++) {
603 if (rpc_clnt_add_xprt(clnt
, &xprtargs
, NULL
, NULL
) < 0)
608 EXPORT_SYMBOL_GPL(rpc_create
);
611 * This function clones the RPC client structure. It allows us to share the
612 * same transport while varying parameters such as the authentication
615 static struct rpc_clnt
*__rpc_clone_client(struct rpc_create_args
*args
,
616 struct rpc_clnt
*clnt
)
618 struct rpc_xprt_switch
*xps
;
619 struct rpc_xprt
*xprt
;
620 struct rpc_clnt
*new;
625 xprt
= xprt_get(rcu_dereference(clnt
->cl_xprt
));
626 xps
= xprt_switch_get(rcu_dereference(clnt
->cl_xpi
.xpi_xpswitch
));
628 if (xprt
== NULL
|| xps
== NULL
) {
630 xprt_switch_put(xps
);
633 args
->servername
= xprt
->servername
;
634 args
->nodename
= clnt
->cl_nodename
;
636 new = rpc_new_client(args
, xps
, xprt
, clnt
);
642 /* Turn off autobind on clones */
643 new->cl_autobind
= 0;
644 new->cl_softrtry
= clnt
->cl_softrtry
;
645 new->cl_softerr
= clnt
->cl_softerr
;
646 new->cl_noretranstimeo
= clnt
->cl_noretranstimeo
;
647 new->cl_discrtry
= clnt
->cl_discrtry
;
648 new->cl_chatty
= clnt
->cl_chatty
;
649 new->cl_principal
= clnt
->cl_principal
;
653 dprintk("RPC: %s: returned error %d\n", __func__
, err
);
658 * rpc_clone_client - Clone an RPC client structure
660 * @clnt: RPC client whose parameters are copied
662 * Returns a fresh RPC client or an ERR_PTR.
664 struct rpc_clnt
*rpc_clone_client(struct rpc_clnt
*clnt
)
666 struct rpc_create_args args
= {
667 .program
= clnt
->cl_program
,
668 .prognumber
= clnt
->cl_prog
,
669 .version
= clnt
->cl_vers
,
670 .authflavor
= clnt
->cl_auth
->au_flavor
,
671 .cred
= clnt
->cl_cred
,
673 return __rpc_clone_client(&args
, clnt
);
675 EXPORT_SYMBOL_GPL(rpc_clone_client
);
678 * rpc_clone_client_set_auth - Clone an RPC client structure and set its auth
680 * @clnt: RPC client whose parameters are copied
681 * @flavor: security flavor for new client
683 * Returns a fresh RPC client or an ERR_PTR.
686 rpc_clone_client_set_auth(struct rpc_clnt
*clnt
, rpc_authflavor_t flavor
)
688 struct rpc_create_args args
= {
689 .program
= clnt
->cl_program
,
690 .prognumber
= clnt
->cl_prog
,
691 .version
= clnt
->cl_vers
,
692 .authflavor
= flavor
,
693 .cred
= clnt
->cl_cred
,
695 return __rpc_clone_client(&args
, clnt
);
697 EXPORT_SYMBOL_GPL(rpc_clone_client_set_auth
);
700 * rpc_switch_client_transport: switch the RPC transport on the fly
701 * @clnt: pointer to a struct rpc_clnt
702 * @args: pointer to the new transport arguments
703 * @timeout: pointer to the new timeout parameters
705 * This function allows the caller to switch the RPC transport for the
706 * rpc_clnt structure 'clnt' to allow it to connect to a mirrored NFS
707 * server, for instance. It assumes that the caller has ensured that
708 * there are no active RPC tasks by using some form of locking.
710 * Returns zero if "clnt" is now using the new xprt. Otherwise a
711 * negative errno is returned, and "clnt" continues to use the old
714 int rpc_switch_client_transport(struct rpc_clnt
*clnt
,
715 struct xprt_create
*args
,
716 const struct rpc_timeout
*timeout
)
718 const struct rpc_timeout
*old_timeo
;
719 rpc_authflavor_t pseudoflavor
;
720 struct rpc_xprt_switch
*xps
, *oldxps
;
721 struct rpc_xprt
*xprt
, *old
;
722 struct rpc_clnt
*parent
;
725 xprt
= xprt_create_transport(args
);
727 dprintk("RPC: failed to create new xprt for clnt %p\n",
729 return PTR_ERR(xprt
);
732 xps
= xprt_switch_alloc(xprt
, GFP_KERNEL
);
738 pseudoflavor
= clnt
->cl_auth
->au_flavor
;
740 old_timeo
= clnt
->cl_timeout
;
741 old
= rpc_clnt_set_transport(clnt
, xprt
, timeout
);
742 oldxps
= xprt_iter_xchg_switch(&clnt
->cl_xpi
, xps
);
744 rpc_unregister_client(clnt
);
745 __rpc_clnt_remove_pipedir(clnt
);
746 rpc_clnt_debugfs_unregister(clnt
);
749 * A new transport was created. "clnt" therefore
750 * becomes the root of a new cl_parent tree. clnt's
751 * children, if it has any, still point to the old xprt.
753 parent
= clnt
->cl_parent
;
754 clnt
->cl_parent
= clnt
;
757 * The old rpc_auth cache cannot be re-used. GSS
758 * contexts in particular are between a single
761 err
= rpc_client_register(clnt
, pseudoflavor
, NULL
);
767 rpc_release_client(parent
);
768 xprt_switch_put(oldxps
);
770 dprintk("RPC: replaced xprt for clnt %p\n", clnt
);
774 xps
= xprt_iter_xchg_switch(&clnt
->cl_xpi
, oldxps
);
775 rpc_clnt_set_transport(clnt
, old
, old_timeo
);
776 clnt
->cl_parent
= parent
;
777 rpc_client_register(clnt
, pseudoflavor
, NULL
);
778 xprt_switch_put(xps
);
780 dprintk("RPC: failed to switch xprt for clnt %p\n", clnt
);
783 EXPORT_SYMBOL_GPL(rpc_switch_client_transport
);
786 int rpc_clnt_xprt_iter_init(struct rpc_clnt
*clnt
, struct rpc_xprt_iter
*xpi
)
788 struct rpc_xprt_switch
*xps
;
791 xps
= xprt_switch_get(rcu_dereference(clnt
->cl_xpi
.xpi_xpswitch
));
795 xprt_iter_init_listall(xpi
, xps
);
796 xprt_switch_put(xps
);
801 * rpc_clnt_iterate_for_each_xprt - Apply a function to all transports
802 * @clnt: pointer to client
803 * @fn: function to apply
804 * @data: void pointer to function data
806 * Iterates through the list of RPC transports currently attached to the
807 * client and applies the function fn(clnt, xprt, data).
809 * On error, the iteration stops, and the function returns the error value.
811 int rpc_clnt_iterate_for_each_xprt(struct rpc_clnt
*clnt
,
812 int (*fn
)(struct rpc_clnt
*, struct rpc_xprt
*, void *),
815 struct rpc_xprt_iter xpi
;
818 ret
= rpc_clnt_xprt_iter_init(clnt
, &xpi
);
822 struct rpc_xprt
*xprt
= xprt_iter_get_next(&xpi
);
826 ret
= fn(clnt
, xprt
, data
);
831 xprt_iter_destroy(&xpi
);
834 EXPORT_SYMBOL_GPL(rpc_clnt_iterate_for_each_xprt
);
837 * Kill all tasks for the given client.
838 * XXX: kill their descendants as well?
840 void rpc_killall_tasks(struct rpc_clnt
*clnt
)
842 struct rpc_task
*rovr
;
845 if (list_empty(&clnt
->cl_tasks
))
847 dprintk("RPC: killing all tasks for client %p\n", clnt
);
849 * Spin lock all_tasks to prevent changes...
851 spin_lock(&clnt
->cl_lock
);
852 list_for_each_entry(rovr
, &clnt
->cl_tasks
, tk_task
)
853 rpc_signal_task(rovr
);
854 spin_unlock(&clnt
->cl_lock
);
856 EXPORT_SYMBOL_GPL(rpc_killall_tasks
);
859 * Properly shut down an RPC client, terminating all outstanding
862 void rpc_shutdown_client(struct rpc_clnt
*clnt
)
866 dprintk_rcu("RPC: shutting down %s client for %s\n",
867 clnt
->cl_program
->name
,
868 rcu_dereference(clnt
->cl_xprt
)->servername
);
870 while (!list_empty(&clnt
->cl_tasks
)) {
871 rpc_killall_tasks(clnt
);
872 wait_event_timeout(destroy_wait
,
873 list_empty(&clnt
->cl_tasks
), 1*HZ
);
876 rpc_release_client(clnt
);
878 EXPORT_SYMBOL_GPL(rpc_shutdown_client
);
883 static struct rpc_clnt
*
884 rpc_free_client(struct rpc_clnt
*clnt
)
886 struct rpc_clnt
*parent
= NULL
;
888 dprintk_rcu("RPC: destroying %s client for %s\n",
889 clnt
->cl_program
->name
,
890 rcu_dereference(clnt
->cl_xprt
)->servername
);
891 if (clnt
->cl_parent
!= clnt
)
892 parent
= clnt
->cl_parent
;
893 rpc_clnt_debugfs_unregister(clnt
);
894 rpc_clnt_remove_pipedir(clnt
);
895 rpc_unregister_client(clnt
);
896 rpc_free_iostats(clnt
->cl_metrics
);
897 clnt
->cl_metrics
= NULL
;
898 xprt_put(rcu_dereference_raw(clnt
->cl_xprt
));
899 xprt_iter_destroy(&clnt
->cl_xpi
);
901 put_cred(clnt
->cl_cred
);
910 static struct rpc_clnt
*
911 rpc_free_auth(struct rpc_clnt
*clnt
)
913 if (clnt
->cl_auth
== NULL
)
914 return rpc_free_client(clnt
);
917 * Note: RPCSEC_GSS may need to send NULL RPC calls in order to
918 * release remaining GSS contexts. This mechanism ensures
919 * that it can do so safely.
921 atomic_inc(&clnt
->cl_count
);
922 rpcauth_release(clnt
->cl_auth
);
923 clnt
->cl_auth
= NULL
;
924 if (atomic_dec_and_test(&clnt
->cl_count
))
925 return rpc_free_client(clnt
);
930 * Release reference to the RPC client
933 rpc_release_client(struct rpc_clnt
*clnt
)
935 dprintk("RPC: rpc_release_client(%p)\n", clnt
);
938 if (list_empty(&clnt
->cl_tasks
))
939 wake_up(&destroy_wait
);
940 if (!atomic_dec_and_test(&clnt
->cl_count
))
942 clnt
= rpc_free_auth(clnt
);
943 } while (clnt
!= NULL
);
945 EXPORT_SYMBOL_GPL(rpc_release_client
);
948 * rpc_bind_new_program - bind a new RPC program to an existing client
949 * @old: old rpc_client
950 * @program: rpc program to set
951 * @vers: rpc program version
953 * Clones the rpc client and sets up a new RPC program. This is mainly
954 * of use for enabling different RPC programs to share the same transport.
955 * The Sun NFSv2/v3 ACL protocol can do this.
957 struct rpc_clnt
*rpc_bind_new_program(struct rpc_clnt
*old
,
958 const struct rpc_program
*program
,
961 struct rpc_create_args args
= {
963 .prognumber
= program
->number
,
965 .authflavor
= old
->cl_auth
->au_flavor
,
966 .cred
= old
->cl_cred
,
968 struct rpc_clnt
*clnt
;
971 clnt
= __rpc_clone_client(&args
, old
);
974 err
= rpc_ping(clnt
);
976 rpc_shutdown_client(clnt
);
982 EXPORT_SYMBOL_GPL(rpc_bind_new_program
);
985 rpc_task_get_xprt(struct rpc_clnt
*clnt
, struct rpc_xprt
*xprt
)
987 struct rpc_xprt_switch
*xps
;
992 xps
= rcu_dereference(clnt
->cl_xpi
.xpi_xpswitch
);
993 atomic_long_inc(&xps
->xps_queuelen
);
995 atomic_long_inc(&xprt
->queuelen
);
1001 rpc_task_release_xprt(struct rpc_clnt
*clnt
, struct rpc_xprt
*xprt
)
1003 struct rpc_xprt_switch
*xps
;
1005 atomic_long_dec(&xprt
->queuelen
);
1007 xps
= rcu_dereference(clnt
->cl_xpi
.xpi_xpswitch
);
1008 atomic_long_dec(&xps
->xps_queuelen
);
1014 void rpc_task_release_transport(struct rpc_task
*task
)
1016 struct rpc_xprt
*xprt
= task
->tk_xprt
;
1019 task
->tk_xprt
= NULL
;
1020 if (task
->tk_client
)
1021 rpc_task_release_xprt(task
->tk_client
, xprt
);
1026 EXPORT_SYMBOL_GPL(rpc_task_release_transport
);
1028 void rpc_task_release_client(struct rpc_task
*task
)
1030 struct rpc_clnt
*clnt
= task
->tk_client
;
1032 rpc_task_release_transport(task
);
1034 /* Remove from client task list */
1035 spin_lock(&clnt
->cl_lock
);
1036 list_del(&task
->tk_task
);
1037 spin_unlock(&clnt
->cl_lock
);
1038 task
->tk_client
= NULL
;
1040 rpc_release_client(clnt
);
1044 static struct rpc_xprt
*
1045 rpc_task_get_first_xprt(struct rpc_clnt
*clnt
)
1047 struct rpc_xprt
*xprt
;
1050 xprt
= xprt_get(rcu_dereference(clnt
->cl_xprt
));
1052 return rpc_task_get_xprt(clnt
, xprt
);
1055 static struct rpc_xprt
*
1056 rpc_task_get_next_xprt(struct rpc_clnt
*clnt
)
1058 return rpc_task_get_xprt(clnt
, xprt_iter_get_next(&clnt
->cl_xpi
));
1062 void rpc_task_set_transport(struct rpc_task
*task
, struct rpc_clnt
*clnt
)
1066 if (task
->tk_flags
& RPC_TASK_NO_ROUND_ROBIN
)
1067 task
->tk_xprt
= rpc_task_get_first_xprt(clnt
);
1069 task
->tk_xprt
= rpc_task_get_next_xprt(clnt
);
1073 void rpc_task_set_client(struct rpc_task
*task
, struct rpc_clnt
*clnt
)
1077 rpc_task_set_transport(task
, clnt
);
1078 task
->tk_client
= clnt
;
1079 atomic_inc(&clnt
->cl_count
);
1080 if (clnt
->cl_softrtry
)
1081 task
->tk_flags
|= RPC_TASK_SOFT
;
1082 if (clnt
->cl_softerr
)
1083 task
->tk_flags
|= RPC_TASK_TIMEOUT
;
1084 if (clnt
->cl_noretranstimeo
)
1085 task
->tk_flags
|= RPC_TASK_NO_RETRANS_TIMEOUT
;
1086 if (atomic_read(&clnt
->cl_swapper
))
1087 task
->tk_flags
|= RPC_TASK_SWAPPER
;
1088 /* Add to the client's list of all tasks */
1089 spin_lock(&clnt
->cl_lock
);
1090 list_add_tail(&task
->tk_task
, &clnt
->cl_tasks
);
1091 spin_unlock(&clnt
->cl_lock
);
1096 rpc_task_set_rpc_message(struct rpc_task
*task
, const struct rpc_message
*msg
)
1099 task
->tk_msg
.rpc_proc
= msg
->rpc_proc
;
1100 task
->tk_msg
.rpc_argp
= msg
->rpc_argp
;
1101 task
->tk_msg
.rpc_resp
= msg
->rpc_resp
;
1102 if (msg
->rpc_cred
!= NULL
)
1103 task
->tk_msg
.rpc_cred
= get_cred(msg
->rpc_cred
);
1108 * Default callback for async RPC calls
1111 rpc_default_callback(struct rpc_task
*task
, void *data
)
1115 static const struct rpc_call_ops rpc_default_ops
= {
1116 .rpc_call_done
= rpc_default_callback
,
1120 * rpc_run_task - Allocate a new RPC task, then run rpc_execute against it
1121 * @task_setup_data: pointer to task initialisation data
1123 struct rpc_task
*rpc_run_task(const struct rpc_task_setup
*task_setup_data
)
1125 struct rpc_task
*task
;
1127 task
= rpc_new_task(task_setup_data
);
1129 rpc_task_set_client(task
, task_setup_data
->rpc_client
);
1130 rpc_task_set_rpc_message(task
, task_setup_data
->rpc_message
);
1132 if (task
->tk_action
== NULL
)
1133 rpc_call_start(task
);
1135 atomic_inc(&task
->tk_count
);
1139 EXPORT_SYMBOL_GPL(rpc_run_task
);
1142 * rpc_call_sync - Perform a synchronous RPC call
1143 * @clnt: pointer to RPC client
1144 * @msg: RPC call parameters
1145 * @flags: RPC call flags
1147 int rpc_call_sync(struct rpc_clnt
*clnt
, const struct rpc_message
*msg
, int flags
)
1149 struct rpc_task
*task
;
1150 struct rpc_task_setup task_setup_data
= {
1153 .callback_ops
= &rpc_default_ops
,
1158 WARN_ON_ONCE(flags
& RPC_TASK_ASYNC
);
1159 if (flags
& RPC_TASK_ASYNC
) {
1160 rpc_release_calldata(task_setup_data
.callback_ops
,
1161 task_setup_data
.callback_data
);
1165 task
= rpc_run_task(&task_setup_data
);
1167 return PTR_ERR(task
);
1168 status
= task
->tk_status
;
1172 EXPORT_SYMBOL_GPL(rpc_call_sync
);
1175 * rpc_call_async - Perform an asynchronous RPC call
1176 * @clnt: pointer to RPC client
1177 * @msg: RPC call parameters
1178 * @flags: RPC call flags
1179 * @tk_ops: RPC call ops
1180 * @data: user call data
1183 rpc_call_async(struct rpc_clnt
*clnt
, const struct rpc_message
*msg
, int flags
,
1184 const struct rpc_call_ops
*tk_ops
, void *data
)
1186 struct rpc_task
*task
;
1187 struct rpc_task_setup task_setup_data
= {
1190 .callback_ops
= tk_ops
,
1191 .callback_data
= data
,
1192 .flags
= flags
|RPC_TASK_ASYNC
,
1195 task
= rpc_run_task(&task_setup_data
);
1197 return PTR_ERR(task
);
1201 EXPORT_SYMBOL_GPL(rpc_call_async
);
1203 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
1204 static void call_bc_encode(struct rpc_task
*task
);
1207 * rpc_run_bc_task - Allocate a new RPC task for backchannel use, then run
1208 * rpc_execute against it
1211 struct rpc_task
*rpc_run_bc_task(struct rpc_rqst
*req
)
1213 struct rpc_task
*task
;
1214 struct rpc_task_setup task_setup_data
= {
1215 .callback_ops
= &rpc_default_ops
,
1216 .flags
= RPC_TASK_SOFTCONN
|
1217 RPC_TASK_NO_RETRANS_TIMEOUT
,
1220 dprintk("RPC: rpc_run_bc_task req= %p\n", req
);
1222 * Create an rpc_task to send the data
1224 task
= rpc_new_task(&task_setup_data
);
1225 xprt_init_bc_request(req
, task
);
1227 task
->tk_action
= call_bc_encode
;
1228 atomic_inc(&task
->tk_count
);
1229 WARN_ON_ONCE(atomic_read(&task
->tk_count
) != 2);
1232 dprintk("RPC: rpc_run_bc_task: task= %p\n", task
);
1235 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
1238 * rpc_prepare_reply_pages - Prepare to receive a reply data payload into pages
1239 * @req: RPC request to prepare
1240 * @pages: vector of struct page pointers
1241 * @base: offset in first page where receive should start, in bytes
1242 * @len: expected size of the upper layer data payload, in bytes
1243 * @hdrsize: expected size of upper layer reply header, in XDR words
1246 void rpc_prepare_reply_pages(struct rpc_rqst
*req
, struct page
**pages
,
1247 unsigned int base
, unsigned int len
,
1248 unsigned int hdrsize
)
1250 /* Subtract one to force an extra word of buffer space for the
1251 * payload's XDR pad to fall into the rcv_buf's tail iovec.
1253 hdrsize
+= RPC_REPHDRSIZE
+ req
->rq_cred
->cr_auth
->au_ralign
- 1;
1255 xdr_inline_pages(&req
->rq_rcv_buf
, hdrsize
<< 2, pages
, base
, len
);
1256 trace_rpc_reply_pages(req
);
1258 EXPORT_SYMBOL_GPL(rpc_prepare_reply_pages
);
1261 rpc_call_start(struct rpc_task
*task
)
1263 task
->tk_action
= call_start
;
1265 EXPORT_SYMBOL_GPL(rpc_call_start
);
1268 * rpc_peeraddr - extract remote peer address from clnt's xprt
1269 * @clnt: RPC client structure
1270 * @buf: target buffer
1271 * @bufsize: length of target buffer
1273 * Returns the number of bytes that are actually in the stored address.
1275 size_t rpc_peeraddr(struct rpc_clnt
*clnt
, struct sockaddr
*buf
, size_t bufsize
)
1278 struct rpc_xprt
*xprt
;
1281 xprt
= rcu_dereference(clnt
->cl_xprt
);
1283 bytes
= xprt
->addrlen
;
1284 if (bytes
> bufsize
)
1286 memcpy(buf
, &xprt
->addr
, bytes
);
1291 EXPORT_SYMBOL_GPL(rpc_peeraddr
);
1294 * rpc_peeraddr2str - return remote peer address in printable format
1295 * @clnt: RPC client structure
1296 * @format: address format
1298 * NB: the lifetime of the memory referenced by the returned pointer is
1299 * the same as the rpc_xprt itself. As long as the caller uses this
1300 * pointer, it must hold the RCU read lock.
1302 const char *rpc_peeraddr2str(struct rpc_clnt
*clnt
,
1303 enum rpc_display_format_t format
)
1305 struct rpc_xprt
*xprt
;
1307 xprt
= rcu_dereference(clnt
->cl_xprt
);
1309 if (xprt
->address_strings
[format
] != NULL
)
1310 return xprt
->address_strings
[format
];
1312 return "unprintable";
1314 EXPORT_SYMBOL_GPL(rpc_peeraddr2str
);
1316 static const struct sockaddr_in rpc_inaddr_loopback
= {
1317 .sin_family
= AF_INET
,
1318 .sin_addr
.s_addr
= htonl(INADDR_ANY
),
1321 static const struct sockaddr_in6 rpc_in6addr_loopback
= {
1322 .sin6_family
= AF_INET6
,
1323 .sin6_addr
= IN6ADDR_ANY_INIT
,
1327 * Try a getsockname() on a connected datagram socket. Using a
1328 * connected datagram socket prevents leaving a socket in TIME_WAIT.
1329 * This conserves the ephemeral port number space.
1331 * Returns zero and fills in "buf" if successful; otherwise, a
1332 * negative errno is returned.
1334 static int rpc_sockname(struct net
*net
, struct sockaddr
*sap
, size_t salen
,
1335 struct sockaddr
*buf
)
1337 struct socket
*sock
;
1340 err
= __sock_create(net
, sap
->sa_family
,
1341 SOCK_DGRAM
, IPPROTO_UDP
, &sock
, 1);
1343 dprintk("RPC: can't create UDP socket (%d)\n", err
);
1347 switch (sap
->sa_family
) {
1349 err
= kernel_bind(sock
,
1350 (struct sockaddr
*)&rpc_inaddr_loopback
,
1351 sizeof(rpc_inaddr_loopback
));
1354 err
= kernel_bind(sock
,
1355 (struct sockaddr
*)&rpc_in6addr_loopback
,
1356 sizeof(rpc_in6addr_loopback
));
1359 err
= -EAFNOSUPPORT
;
1363 dprintk("RPC: can't bind UDP socket (%d)\n", err
);
1367 err
= kernel_connect(sock
, sap
, salen
, 0);
1369 dprintk("RPC: can't connect UDP socket (%d)\n", err
);
1373 err
= kernel_getsockname(sock
, buf
);
1375 dprintk("RPC: getsockname failed (%d)\n", err
);
1380 if (buf
->sa_family
== AF_INET6
) {
1381 struct sockaddr_in6
*sin6
= (struct sockaddr_in6
*)buf
;
1382 sin6
->sin6_scope_id
= 0;
1384 dprintk("RPC: %s succeeded\n", __func__
);
1393 * Scraping a connected socket failed, so we don't have a useable
1394 * local address. Fallback: generate an address that will prevent
1395 * the server from calling us back.
1397 * Returns zero and fills in "buf" if successful; otherwise, a
1398 * negative errno is returned.
1400 static int rpc_anyaddr(int family
, struct sockaddr
*buf
, size_t buflen
)
1404 if (buflen
< sizeof(rpc_inaddr_loopback
))
1406 memcpy(buf
, &rpc_inaddr_loopback
,
1407 sizeof(rpc_inaddr_loopback
));
1410 if (buflen
< sizeof(rpc_in6addr_loopback
))
1412 memcpy(buf
, &rpc_in6addr_loopback
,
1413 sizeof(rpc_in6addr_loopback
));
1416 dprintk("RPC: %s: address family not supported\n",
1418 return -EAFNOSUPPORT
;
1420 dprintk("RPC: %s: succeeded\n", __func__
);
1425 * rpc_localaddr - discover local endpoint address for an RPC client
1426 * @clnt: RPC client structure
1427 * @buf: target buffer
1428 * @buflen: size of target buffer, in bytes
1430 * Returns zero and fills in "buf" and "buflen" if successful;
1431 * otherwise, a negative errno is returned.
1433 * This works even if the underlying transport is not currently connected,
1434 * or if the upper layer never previously provided a source address.
1436 * The result of this function call is transient: multiple calls in
1437 * succession may give different results, depending on how local
1438 * networking configuration changes over time.
1440 int rpc_localaddr(struct rpc_clnt
*clnt
, struct sockaddr
*buf
, size_t buflen
)
1442 struct sockaddr_storage address
;
1443 struct sockaddr
*sap
= (struct sockaddr
*)&address
;
1444 struct rpc_xprt
*xprt
;
1450 xprt
= rcu_dereference(clnt
->cl_xprt
);
1451 salen
= xprt
->addrlen
;
1452 memcpy(sap
, &xprt
->addr
, salen
);
1453 net
= get_net(xprt
->xprt_net
);
1456 rpc_set_port(sap
, 0);
1457 err
= rpc_sockname(net
, sap
, salen
, buf
);
1460 /* Couldn't discover local address, return ANYADDR */
1461 return rpc_anyaddr(sap
->sa_family
, buf
, buflen
);
1464 EXPORT_SYMBOL_GPL(rpc_localaddr
);
1467 rpc_setbufsize(struct rpc_clnt
*clnt
, unsigned int sndsize
, unsigned int rcvsize
)
1469 struct rpc_xprt
*xprt
;
1472 xprt
= rcu_dereference(clnt
->cl_xprt
);
1473 if (xprt
->ops
->set_buffer_size
)
1474 xprt
->ops
->set_buffer_size(xprt
, sndsize
, rcvsize
);
1477 EXPORT_SYMBOL_GPL(rpc_setbufsize
);
1480 * rpc_net_ns - Get the network namespace for this RPC client
1481 * @clnt: RPC client to query
1484 struct net
*rpc_net_ns(struct rpc_clnt
*clnt
)
1489 ret
= rcu_dereference(clnt
->cl_xprt
)->xprt_net
;
1493 EXPORT_SYMBOL_GPL(rpc_net_ns
);
1496 * rpc_max_payload - Get maximum payload size for a transport, in bytes
1497 * @clnt: RPC client to query
1499 * For stream transports, this is one RPC record fragment (see RFC
1500 * 1831), as we don't support multi-record requests yet. For datagram
1501 * transports, this is the size of an IP packet minus the IP, UDP, and
1504 size_t rpc_max_payload(struct rpc_clnt
*clnt
)
1509 ret
= rcu_dereference(clnt
->cl_xprt
)->max_payload
;
1513 EXPORT_SYMBOL_GPL(rpc_max_payload
);
1516 * rpc_max_bc_payload - Get maximum backchannel payload size, in bytes
1517 * @clnt: RPC client to query
1519 size_t rpc_max_bc_payload(struct rpc_clnt
*clnt
)
1521 struct rpc_xprt
*xprt
;
1525 xprt
= rcu_dereference(clnt
->cl_xprt
);
1526 ret
= xprt
->ops
->bc_maxpayload(xprt
);
1530 EXPORT_SYMBOL_GPL(rpc_max_bc_payload
);
1532 unsigned int rpc_num_bc_slots(struct rpc_clnt
*clnt
)
1534 struct rpc_xprt
*xprt
;
1538 xprt
= rcu_dereference(clnt
->cl_xprt
);
1539 ret
= xprt
->ops
->bc_num_slots(xprt
);
1543 EXPORT_SYMBOL_GPL(rpc_num_bc_slots
);
1546 * rpc_force_rebind - force transport to check that remote port is unchanged
1547 * @clnt: client to rebind
1550 void rpc_force_rebind(struct rpc_clnt
*clnt
)
1552 if (clnt
->cl_autobind
) {
1554 xprt_clear_bound(rcu_dereference(clnt
->cl_xprt
));
1558 EXPORT_SYMBOL_GPL(rpc_force_rebind
);
1561 __rpc_restart_call(struct rpc_task
*task
, void (*action
)(struct rpc_task
*))
1563 task
->tk_status
= 0;
1564 task
->tk_rpc_status
= 0;
1565 task
->tk_action
= action
;
1570 * Restart an (async) RPC call. Usually called from within the
1574 rpc_restart_call(struct rpc_task
*task
)
1576 return __rpc_restart_call(task
, call_start
);
1578 EXPORT_SYMBOL_GPL(rpc_restart_call
);
1581 * Restart an (async) RPC call from the call_prepare state.
1582 * Usually called from within the exit handler.
1585 rpc_restart_call_prepare(struct rpc_task
*task
)
1587 if (task
->tk_ops
->rpc_call_prepare
!= NULL
)
1588 return __rpc_restart_call(task
, rpc_prepare_task
);
1589 return rpc_restart_call(task
);
1591 EXPORT_SYMBOL_GPL(rpc_restart_call_prepare
);
1594 *rpc_proc_name(const struct rpc_task
*task
)
1596 const struct rpc_procinfo
*proc
= task
->tk_msg
.rpc_proc
;
1600 return proc
->p_name
;
1608 __rpc_call_rpcerror(struct rpc_task
*task
, int tk_status
, int rpc_status
)
1610 task
->tk_rpc_status
= rpc_status
;
1611 rpc_exit(task
, tk_status
);
1615 rpc_call_rpcerror(struct rpc_task
*task
, int status
)
1617 __rpc_call_rpcerror(task
, status
, status
);
1623 * Other FSM states can be visited zero or more times, but
1624 * this state is visited exactly once for each RPC.
1627 call_start(struct rpc_task
*task
)
1629 struct rpc_clnt
*clnt
= task
->tk_client
;
1630 int idx
= task
->tk_msg
.rpc_proc
->p_statidx
;
1632 trace_rpc_request(task
);
1633 dprintk("RPC: %5u call_start %s%d proc %s (%s)\n", task
->tk_pid
,
1634 clnt
->cl_program
->name
, clnt
->cl_vers
,
1635 rpc_proc_name(task
),
1636 (RPC_IS_ASYNC(task
) ? "async" : "sync"));
1638 /* Increment call count (version might not be valid for ping) */
1639 if (clnt
->cl_program
->version
[clnt
->cl_vers
])
1640 clnt
->cl_program
->version
[clnt
->cl_vers
]->counts
[idx
]++;
1641 clnt
->cl_stats
->rpccnt
++;
1642 task
->tk_action
= call_reserve
;
1643 rpc_task_set_transport(task
, clnt
);
1647 * 1. Reserve an RPC call slot
1650 call_reserve(struct rpc_task
*task
)
1652 dprint_status(task
);
1654 task
->tk_status
= 0;
1655 task
->tk_action
= call_reserveresult
;
1659 static void call_retry_reserve(struct rpc_task
*task
);
1662 * 1b. Grok the result of xprt_reserve()
1665 call_reserveresult(struct rpc_task
*task
)
1667 int status
= task
->tk_status
;
1669 dprint_status(task
);
1672 * After a call to xprt_reserve(), we must have either
1673 * a request slot or else an error status.
1675 task
->tk_status
= 0;
1677 if (task
->tk_rqstp
) {
1678 task
->tk_action
= call_refresh
;
1682 rpc_call_rpcerror(task
, -EIO
);
1687 * Even though there was an error, we may have acquired
1688 * a request slot somehow. Make sure not to leak it.
1695 rpc_delay(task
, HZ
>> 2);
1697 case -EAGAIN
: /* woken up; retry */
1698 task
->tk_action
= call_retry_reserve
;
1701 rpc_call_rpcerror(task
, status
);
1706 * 1c. Retry reserving an RPC call slot
1709 call_retry_reserve(struct rpc_task
*task
)
1711 dprint_status(task
);
1713 task
->tk_status
= 0;
1714 task
->tk_action
= call_reserveresult
;
1715 xprt_retry_reserve(task
);
1719 * 2. Bind and/or refresh the credentials
1722 call_refresh(struct rpc_task
*task
)
1724 dprint_status(task
);
1726 task
->tk_action
= call_refreshresult
;
1727 task
->tk_status
= 0;
1728 task
->tk_client
->cl_stats
->rpcauthrefresh
++;
1729 rpcauth_refreshcred(task
);
1733 * 2a. Process the results of a credential refresh
1736 call_refreshresult(struct rpc_task
*task
)
1738 int status
= task
->tk_status
;
1740 dprint_status(task
);
1742 task
->tk_status
= 0;
1743 task
->tk_action
= call_refresh
;
1746 if (rpcauth_uptodatecred(task
)) {
1747 task
->tk_action
= call_allocate
;
1750 /* Use rate-limiting and a max number of retries if refresh
1751 * had status 0 but failed to update the cred.
1755 rpc_delay(task
, 3*HZ
);
1761 if (!task
->tk_cred_retry
)
1763 task
->tk_cred_retry
--;
1764 dprintk("RPC: %5u %s: retry refresh creds\n",
1765 task
->tk_pid
, __func__
);
1768 dprintk("RPC: %5u %s: refresh creds failed with error %d\n",
1769 task
->tk_pid
, __func__
, status
);
1770 rpc_call_rpcerror(task
, status
);
1774 * 2b. Allocate the buffer. For details, see sched.c:rpc_malloc.
1775 * (Note: buffer memory is freed in xprt_release).
1778 call_allocate(struct rpc_task
*task
)
1780 const struct rpc_auth
*auth
= task
->tk_rqstp
->rq_cred
->cr_auth
;
1781 struct rpc_rqst
*req
= task
->tk_rqstp
;
1782 struct rpc_xprt
*xprt
= req
->rq_xprt
;
1783 const struct rpc_procinfo
*proc
= task
->tk_msg
.rpc_proc
;
1786 dprint_status(task
);
1788 task
->tk_status
= 0;
1789 task
->tk_action
= call_encode
;
1794 if (proc
->p_proc
!= 0) {
1795 BUG_ON(proc
->p_arglen
== 0);
1796 if (proc
->p_decode
!= NULL
)
1797 BUG_ON(proc
->p_replen
== 0);
1801 * Calculate the size (in quads) of the RPC call
1802 * and reply headers, and convert both values
1805 req
->rq_callsize
= RPC_CALLHDRSIZE
+ (auth
->au_cslack
<< 1) +
1807 req
->rq_callsize
<<= 2;
1809 * Note: the reply buffer must at minimum allocate enough space
1810 * for the 'struct accepted_reply' from RFC5531.
1812 req
->rq_rcvsize
= RPC_REPHDRSIZE
+ auth
->au_rslack
+ \
1813 max_t(size_t, proc
->p_replen
, 2);
1814 req
->rq_rcvsize
<<= 2;
1816 status
= xprt
->ops
->buf_alloc(task
);
1817 xprt_inject_disconnect(xprt
);
1820 if (status
!= -ENOMEM
) {
1821 rpc_call_rpcerror(task
, status
);
1825 dprintk("RPC: %5u rpc_buffer allocation failed\n", task
->tk_pid
);
1827 if (RPC_IS_ASYNC(task
) || !fatal_signal_pending(current
)) {
1828 task
->tk_action
= call_allocate
;
1829 rpc_delay(task
, HZ
>>4);
1833 rpc_call_rpcerror(task
, -ERESTARTSYS
);
1837 rpc_task_need_encode(struct rpc_task
*task
)
1839 return test_bit(RPC_TASK_NEED_XMIT
, &task
->tk_runstate
) == 0 &&
1840 (!(task
->tk_flags
& RPC_TASK_SENT
) ||
1841 !(task
->tk_flags
& RPC_TASK_NO_RETRANS_TIMEOUT
) ||
1842 xprt_request_need_retransmit(task
));
1846 rpc_xdr_encode(struct rpc_task
*task
)
1848 struct rpc_rqst
*req
= task
->tk_rqstp
;
1849 struct xdr_stream xdr
;
1851 xdr_buf_init(&req
->rq_snd_buf
,
1854 xdr_buf_init(&req
->rq_rcv_buf
,
1858 req
->rq_reply_bytes_recvd
= 0;
1859 req
->rq_snd_buf
.head
[0].iov_len
= 0;
1860 xdr_init_encode(&xdr
, &req
->rq_snd_buf
,
1861 req
->rq_snd_buf
.head
[0].iov_base
, req
);
1862 xdr_free_bvec(&req
->rq_snd_buf
);
1863 if (rpc_encode_header(task
, &xdr
))
1866 task
->tk_status
= rpcauth_wrap_req(task
, &xdr
);
1870 * 3. Encode arguments of an RPC call
1873 call_encode(struct rpc_task
*task
)
1875 if (!rpc_task_need_encode(task
))
1877 dprint_status(task
);
1878 /* Dequeue task from the receive queue while we're encoding */
1879 xprt_request_dequeue_xprt(task
);
1880 /* Encode here so that rpcsec_gss can use correct sequence number. */
1881 rpc_xdr_encode(task
);
1882 /* Did the encode result in an error condition? */
1883 if (task
->tk_status
!= 0) {
1884 /* Was the error nonfatal? */
1885 switch (task
->tk_status
) {
1888 rpc_delay(task
, HZ
>> 4);
1891 if (!task
->tk_cred_retry
) {
1892 rpc_exit(task
, task
->tk_status
);
1894 task
->tk_action
= call_refresh
;
1895 task
->tk_cred_retry
--;
1896 dprintk("RPC: %5u %s: retry refresh creds\n",
1897 task
->tk_pid
, __func__
);
1901 rpc_call_rpcerror(task
, task
->tk_status
);
1906 /* Add task to reply queue before transmission to avoid races */
1907 if (rpc_reply_expected(task
))
1908 xprt_request_enqueue_receive(task
);
1909 xprt_request_enqueue_transmit(task
);
1911 task
->tk_action
= call_transmit
;
1912 /* Check that the connection is OK */
1913 if (!xprt_bound(task
->tk_xprt
))
1914 task
->tk_action
= call_bind
;
1915 else if (!xprt_connected(task
->tk_xprt
))
1916 task
->tk_action
= call_connect
;
1920 * Helpers to check if the task was already transmitted, and
1921 * to take action when that is the case.
1924 rpc_task_transmitted(struct rpc_task
*task
)
1926 return !test_bit(RPC_TASK_NEED_XMIT
, &task
->tk_runstate
);
1930 rpc_task_handle_transmitted(struct rpc_task
*task
)
1932 xprt_end_transmit(task
);
1933 task
->tk_action
= call_transmit_status
;
1937 * 4. Get the server port number if not yet set
1940 call_bind(struct rpc_task
*task
)
1942 struct rpc_xprt
*xprt
= task
->tk_rqstp
->rq_xprt
;
1944 if (rpc_task_transmitted(task
)) {
1945 rpc_task_handle_transmitted(task
);
1949 if (xprt_bound(xprt
)) {
1950 task
->tk_action
= call_connect
;
1954 dprint_status(task
);
1956 task
->tk_action
= call_bind_status
;
1957 if (!xprt_prepare_transmit(task
))
1960 xprt
->ops
->rpcbind(task
);
1964 * 4a. Sort out bind result
1967 call_bind_status(struct rpc_task
*task
)
1969 struct rpc_xprt
*xprt
= task
->tk_rqstp
->rq_xprt
;
1972 if (rpc_task_transmitted(task
)) {
1973 rpc_task_handle_transmitted(task
);
1977 dprint_status(task
);
1978 trace_rpc_bind_status(task
);
1979 if (task
->tk_status
>= 0)
1981 if (xprt_bound(xprt
)) {
1982 task
->tk_status
= 0;
1986 switch (task
->tk_status
) {
1988 dprintk("RPC: %5u rpcbind out of memory\n", task
->tk_pid
);
1989 rpc_delay(task
, HZ
>> 2);
1992 dprintk("RPC: %5u remote rpcbind: RPC program/version "
1993 "unavailable\n", task
->tk_pid
);
1994 /* fail immediately if this is an RPC ping */
1995 if (task
->tk_msg
.rpc_proc
->p_proc
== 0) {
1996 status
= -EOPNOTSUPP
;
1999 if (task
->tk_rebind_retry
== 0)
2001 task
->tk_rebind_retry
--;
2002 rpc_delay(task
, 3*HZ
);
2005 rpc_delay(task
, HZ
>> 2);
2010 dprintk("RPC: %5u rpcbind request timed out\n",
2014 /* server doesn't support any rpcbind version we know of */
2015 dprintk("RPC: %5u unrecognized remote rpcbind service\n",
2018 case -EPROTONOSUPPORT
:
2019 dprintk("RPC: %5u remote rpcbind version unavailable, retrying\n",
2022 case -ECONNREFUSED
: /* connection problems */
2031 dprintk("RPC: %5u remote rpcbind unreachable: %d\n",
2032 task
->tk_pid
, task
->tk_status
);
2033 if (!RPC_IS_SOFTCONN(task
)) {
2034 rpc_delay(task
, 5*HZ
);
2037 status
= task
->tk_status
;
2040 dprintk("RPC: %5u unrecognized rpcbind error (%d)\n",
2041 task
->tk_pid
, -task
->tk_status
);
2044 rpc_call_rpcerror(task
, status
);
2047 task
->tk_action
= call_connect
;
2050 task
->tk_status
= 0;
2051 task
->tk_action
= call_bind
;
2052 rpc_check_timeout(task
);
2056 * 4b. Connect to the RPC server
2059 call_connect(struct rpc_task
*task
)
2061 struct rpc_xprt
*xprt
= task
->tk_rqstp
->rq_xprt
;
2063 if (rpc_task_transmitted(task
)) {
2064 rpc_task_handle_transmitted(task
);
2068 if (xprt_connected(xprt
)) {
2069 task
->tk_action
= call_transmit
;
2073 dprintk("RPC: %5u call_connect xprt %p %s connected\n",
2075 (xprt_connected(xprt
) ? "is" : "is not"));
2077 task
->tk_action
= call_connect_status
;
2078 if (task
->tk_status
< 0)
2080 if (task
->tk_flags
& RPC_TASK_NOCONNECT
) {
2081 rpc_call_rpcerror(task
, -ENOTCONN
);
2084 if (!xprt_prepare_transmit(task
))
2090 * 4c. Sort out connect result
2093 call_connect_status(struct rpc_task
*task
)
2095 struct rpc_xprt
*xprt
= task
->tk_rqstp
->rq_xprt
;
2096 struct rpc_clnt
*clnt
= task
->tk_client
;
2097 int status
= task
->tk_status
;
2099 if (rpc_task_transmitted(task
)) {
2100 rpc_task_handle_transmitted(task
);
2104 dprint_status(task
);
2105 trace_rpc_connect_status(task
);
2107 if (task
->tk_status
== 0) {
2108 clnt
->cl_stats
->netreconn
++;
2111 if (xprt_connected(xprt
)) {
2112 task
->tk_status
= 0;
2116 task
->tk_status
= 0;
2119 /* A positive refusal suggests a rebind is needed. */
2120 if (RPC_IS_SOFTCONN(task
))
2122 if (clnt
->cl_autobind
) {
2123 rpc_force_rebind(clnt
);
2133 xprt_conditional_disconnect(task
->tk_rqstp
->rq_xprt
,
2134 task
->tk_rqstp
->rq_connect_cookie
);
2135 if (RPC_IS_SOFTCONN(task
))
2137 /* retry with existing socket, after a delay */
2138 rpc_delay(task
, 3*HZ
);
2146 rpc_delay(task
, HZ
>> 2);
2149 rpc_call_rpcerror(task
, status
);
2152 task
->tk_action
= call_transmit
;
2155 /* Check for timeouts before looping back to call_bind */
2156 task
->tk_action
= call_bind
;
2157 rpc_check_timeout(task
);
2161 * 5. Transmit the RPC request, and wait for reply
2164 call_transmit(struct rpc_task
*task
)
2166 if (rpc_task_transmitted(task
)) {
2167 rpc_task_handle_transmitted(task
);
2171 dprint_status(task
);
2173 task
->tk_action
= call_transmit_status
;
2174 if (!xprt_prepare_transmit(task
))
2176 task
->tk_status
= 0;
2177 if (test_bit(RPC_TASK_NEED_XMIT
, &task
->tk_runstate
)) {
2178 if (!xprt_connected(task
->tk_xprt
)) {
2179 task
->tk_status
= -ENOTCONN
;
2182 xprt_transmit(task
);
2184 xprt_end_transmit(task
);
2188 * 5a. Handle cleanup after a transmission
2191 call_transmit_status(struct rpc_task
*task
)
2193 task
->tk_action
= call_status
;
2196 * Common case: success. Force the compiler to put this
2199 if (rpc_task_transmitted(task
)) {
2200 task
->tk_status
= 0;
2201 xprt_request_wait_receive(task
);
2205 switch (task
->tk_status
) {
2207 dprint_status(task
);
2210 task
->tk_status
= 0;
2211 task
->tk_action
= call_encode
;
2214 * Special cases: if we've been waiting on the
2215 * socket's write_space() callback, or if the
2216 * socket just returned a connection error,
2217 * then hold onto the transport lock.
2220 rpc_delay(task
, HZ
>>2);
2224 task
->tk_action
= call_transmit
;
2225 task
->tk_status
= 0;
2233 if (RPC_IS_SOFTCONN(task
)) {
2234 if (!task
->tk_msg
.rpc_proc
->p_proc
)
2235 trace_xprt_ping(task
->tk_xprt
,
2237 rpc_call_rpcerror(task
, task
->tk_status
);
2246 task
->tk_action
= call_bind
;
2247 task
->tk_status
= 0;
2250 rpc_check_timeout(task
);
2253 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
2254 static void call_bc_transmit(struct rpc_task
*task
);
2255 static void call_bc_transmit_status(struct rpc_task
*task
);
2258 call_bc_encode(struct rpc_task
*task
)
2260 xprt_request_enqueue_transmit(task
);
2261 task
->tk_action
= call_bc_transmit
;
2265 * 5b. Send the backchannel RPC reply. On error, drop the reply. In
2266 * addition, disconnect on connectivity errors.
2269 call_bc_transmit(struct rpc_task
*task
)
2271 task
->tk_action
= call_bc_transmit_status
;
2272 if (test_bit(RPC_TASK_NEED_XMIT
, &task
->tk_runstate
)) {
2273 if (!xprt_prepare_transmit(task
))
2275 task
->tk_status
= 0;
2276 xprt_transmit(task
);
2278 xprt_end_transmit(task
);
2282 call_bc_transmit_status(struct rpc_task
*task
)
2284 struct rpc_rqst
*req
= task
->tk_rqstp
;
2286 if (rpc_task_transmitted(task
))
2287 task
->tk_status
= 0;
2289 dprint_status(task
);
2291 switch (task
->tk_status
) {
2305 rpc_delay(task
, HZ
>>2);
2309 task
->tk_status
= 0;
2310 task
->tk_action
= call_bc_transmit
;
2314 * Problem reaching the server. Disconnect and let the
2315 * forechannel reestablish the connection. The server will
2316 * have to retransmit the backchannel request and we'll
2317 * reprocess it. Since these ops are idempotent, there's no
2318 * need to cache our reply at this time.
2320 printk(KERN_NOTICE
"RPC: Could not send backchannel reply "
2321 "error: %d\n", task
->tk_status
);
2322 xprt_conditional_disconnect(req
->rq_xprt
,
2323 req
->rq_connect_cookie
);
2327 * We were unable to reply and will have to drop the
2328 * request. The server should reconnect and retransmit.
2330 printk(KERN_NOTICE
"RPC: Could not send backchannel reply "
2331 "error: %d\n", task
->tk_status
);
2334 task
->tk_action
= rpc_exit_task
;
2336 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
2339 * 6. Sort out the RPC call status
2342 call_status(struct rpc_task
*task
)
2344 struct rpc_clnt
*clnt
= task
->tk_client
;
2347 if (!task
->tk_msg
.rpc_proc
->p_proc
)
2348 trace_xprt_ping(task
->tk_xprt
, task
->tk_status
);
2350 dprint_status(task
);
2352 status
= task
->tk_status
;
2354 task
->tk_action
= call_decode
;
2358 trace_rpc_call_status(task
);
2359 task
->tk_status
= 0;
2366 if (RPC_IS_SOFTCONN(task
))
2369 * Delay any retries for 3 seconds, then handle as if it
2372 rpc_delay(task
, 3*HZ
);
2380 rpc_force_rebind(clnt
);
2383 rpc_delay(task
, 3*HZ
);
2389 /* shutdown or soft timeout */
2392 if (clnt
->cl_chatty
)
2393 printk("%s: RPC call returned error %d\n",
2394 clnt
->cl_program
->name
, -status
);
2397 task
->tk_action
= call_encode
;
2398 rpc_check_timeout(task
);
2401 rpc_call_rpcerror(task
, status
);
2405 rpc_check_connected(const struct rpc_rqst
*req
)
2407 /* No allocated request or transport? return true */
2408 if (!req
|| !req
->rq_xprt
)
2410 return xprt_connected(req
->rq_xprt
);
2414 rpc_check_timeout(struct rpc_task
*task
)
2416 struct rpc_clnt
*clnt
= task
->tk_client
;
2418 if (xprt_adjust_timeout(task
->tk_rqstp
) == 0)
2421 dprintk("RPC: %5u call_timeout (major)\n", task
->tk_pid
);
2422 task
->tk_timeouts
++;
2424 if (RPC_IS_SOFTCONN(task
) && !rpc_check_connected(task
->tk_rqstp
)) {
2425 rpc_call_rpcerror(task
, -ETIMEDOUT
);
2429 if (RPC_IS_SOFT(task
)) {
2431 * Once a "no retrans timeout" soft tasks (a.k.a NFSv4) has
2432 * been sent, it should time out only if the transport
2433 * connection gets terminally broken.
2435 if ((task
->tk_flags
& RPC_TASK_NO_RETRANS_TIMEOUT
) &&
2436 rpc_check_connected(task
->tk_rqstp
))
2439 if (clnt
->cl_chatty
) {
2440 pr_notice_ratelimited(
2441 "%s: server %s not responding, timed out\n",
2442 clnt
->cl_program
->name
,
2443 task
->tk_xprt
->servername
);
2445 if (task
->tk_flags
& RPC_TASK_TIMEOUT
)
2446 rpc_call_rpcerror(task
, -ETIMEDOUT
);
2448 __rpc_call_rpcerror(task
, -EIO
, -ETIMEDOUT
);
2452 if (!(task
->tk_flags
& RPC_CALL_MAJORSEEN
)) {
2453 task
->tk_flags
|= RPC_CALL_MAJORSEEN
;
2454 if (clnt
->cl_chatty
) {
2455 pr_notice_ratelimited(
2456 "%s: server %s not responding, still trying\n",
2457 clnt
->cl_program
->name
,
2458 task
->tk_xprt
->servername
);
2461 rpc_force_rebind(clnt
);
2463 * Did our request time out due to an RPCSEC_GSS out-of-sequence
2464 * event? RFC2203 requires the server to drop all such requests.
2466 rpcauth_invalcred(task
);
2470 * 7. Decode the RPC reply
2473 call_decode(struct rpc_task
*task
)
2475 struct rpc_clnt
*clnt
= task
->tk_client
;
2476 struct rpc_rqst
*req
= task
->tk_rqstp
;
2477 struct xdr_stream xdr
;
2480 dprint_status(task
);
2482 if (!task
->tk_msg
.rpc_proc
->p_decode
) {
2483 task
->tk_action
= rpc_exit_task
;
2487 if (task
->tk_flags
& RPC_CALL_MAJORSEEN
) {
2488 if (clnt
->cl_chatty
) {
2489 pr_notice_ratelimited("%s: server %s OK\n",
2490 clnt
->cl_program
->name
,
2491 task
->tk_xprt
->servername
);
2493 task
->tk_flags
&= ~RPC_CALL_MAJORSEEN
;
2497 * Ensure that we see all writes made by xprt_complete_rqst()
2498 * before it changed req->rq_reply_bytes_recvd.
2503 * Did we ever call xprt_complete_rqst()? If not, we should assume
2504 * the message is incomplete.
2507 if (!req
->rq_reply_bytes_recvd
)
2510 req
->rq_rcv_buf
.len
= req
->rq_private_buf
.len
;
2512 /* Check that the softirq receive buffer is valid */
2513 WARN_ON(memcmp(&req
->rq_rcv_buf
, &req
->rq_private_buf
,
2514 sizeof(req
->rq_rcv_buf
)) != 0);
2516 xdr_init_decode(&xdr
, &req
->rq_rcv_buf
,
2517 req
->rq_rcv_buf
.head
[0].iov_base
, req
);
2518 err
= rpc_decode_header(task
, &xdr
);
2522 task
->tk_action
= rpc_exit_task
;
2523 task
->tk_status
= rpcauth_unwrap_resp(task
, &xdr
);
2524 dprintk("RPC: %5u %s result %d\n",
2525 task
->tk_pid
, __func__
, task
->tk_status
);
2528 task
->tk_status
= 0;
2529 if (task
->tk_client
->cl_discrtry
)
2530 xprt_conditional_disconnect(req
->rq_xprt
,
2531 req
->rq_connect_cookie
);
2532 task
->tk_action
= call_encode
;
2533 rpc_check_timeout(task
);
2536 task
->tk_action
= call_reserve
;
2537 rpc_check_timeout(task
);
2538 rpcauth_invalcred(task
);
2539 /* Ensure we obtain a new XID if we retry! */
2545 rpc_encode_header(struct rpc_task
*task
, struct xdr_stream
*xdr
)
2547 struct rpc_clnt
*clnt
= task
->tk_client
;
2548 struct rpc_rqst
*req
= task
->tk_rqstp
;
2553 p
= xdr_reserve_space(xdr
, RPC_CALLHDRSIZE
<< 2);
2558 *p
++ = cpu_to_be32(RPC_VERSION
);
2559 *p
++ = cpu_to_be32(clnt
->cl_prog
);
2560 *p
++ = cpu_to_be32(clnt
->cl_vers
);
2561 *p
= cpu_to_be32(task
->tk_msg
.rpc_proc
->p_proc
);
2563 error
= rpcauth_marshcred(task
, xdr
);
2568 trace_rpc_bad_callhdr(task
);
2569 rpc_call_rpcerror(task
, error
);
2574 rpc_decode_header(struct rpc_task
*task
, struct xdr_stream
*xdr
)
2576 struct rpc_clnt
*clnt
= task
->tk_client
;
2580 /* RFC-1014 says that the representation of XDR data must be a
2581 * multiple of four bytes
2582 * - if it isn't pointer subtraction in the NFS client may give
2585 if (task
->tk_rqstp
->rq_rcv_buf
.len
& 3)
2586 goto out_unparsable
;
2588 p
= xdr_inline_decode(xdr
, 3 * sizeof(*p
));
2590 goto out_unparsable
;
2592 if (*p
++ != rpc_reply
)
2593 goto out_unparsable
;
2594 if (*p
++ != rpc_msg_accepted
)
2595 goto out_msg_denied
;
2597 error
= rpcauth_checkverf(task
, xdr
);
2601 p
= xdr_inline_decode(xdr
, sizeof(*p
));
2603 goto out_unparsable
;
2607 case rpc_prog_unavail
:
2608 trace_rpc__prog_unavail(task
);
2609 error
= -EPFNOSUPPORT
;
2611 case rpc_prog_mismatch
:
2612 trace_rpc__prog_mismatch(task
);
2613 error
= -EPROTONOSUPPORT
;
2615 case rpc_proc_unavail
:
2616 trace_rpc__proc_unavail(task
);
2617 error
= -EOPNOTSUPP
;
2619 case rpc_garbage_args
:
2620 case rpc_system_err
:
2621 trace_rpc__garbage_args(task
);
2625 goto out_unparsable
;
2629 clnt
->cl_stats
->rpcgarbage
++;
2630 if (task
->tk_garb_retry
) {
2631 task
->tk_garb_retry
--;
2632 task
->tk_action
= call_encode
;
2636 rpc_call_rpcerror(task
, error
);
2640 trace_rpc__unparsable(task
);
2645 trace_rpc_bad_verifier(task
);
2650 p
= xdr_inline_decode(xdr
, sizeof(*p
));
2652 goto out_unparsable
;
2654 case rpc_auth_error
:
2657 trace_rpc__mismatch(task
);
2658 error
= -EPROTONOSUPPORT
;
2661 goto out_unparsable
;
2664 p
= xdr_inline_decode(xdr
, sizeof(*p
));
2666 goto out_unparsable
;
2668 case rpc_autherr_rejectedcred
:
2669 case rpc_autherr_rejectedverf
:
2670 case rpcsec_gsserr_credproblem
:
2671 case rpcsec_gsserr_ctxproblem
:
2672 if (!task
->tk_cred_retry
)
2674 task
->tk_cred_retry
--;
2675 trace_rpc__stale_creds(task
);
2676 return -EKEYREJECTED
;
2677 case rpc_autherr_badcred
:
2678 case rpc_autherr_badverf
:
2679 /* possibly garbled cred/verf? */
2680 if (!task
->tk_garb_retry
)
2682 task
->tk_garb_retry
--;
2683 trace_rpc__bad_creds(task
);
2684 task
->tk_action
= call_encode
;
2686 case rpc_autherr_tooweak
:
2687 trace_rpc__auth_tooweak(task
);
2688 pr_warn("RPC: server %s requires stronger authentication.\n",
2689 task
->tk_xprt
->servername
);
2692 goto out_unparsable
;
2697 static void rpcproc_encode_null(struct rpc_rqst
*rqstp
, struct xdr_stream
*xdr
,
2702 static int rpcproc_decode_null(struct rpc_rqst
*rqstp
, struct xdr_stream
*xdr
,
2708 static const struct rpc_procinfo rpcproc_null
= {
2709 .p_encode
= rpcproc_encode_null
,
2710 .p_decode
= rpcproc_decode_null
,
2713 static int rpc_ping(struct rpc_clnt
*clnt
)
2715 struct rpc_message msg
= {
2716 .rpc_proc
= &rpcproc_null
,
2719 err
= rpc_call_sync(clnt
, &msg
, RPC_TASK_SOFT
| RPC_TASK_SOFTCONN
|
2720 RPC_TASK_NULLCREDS
);
2725 struct rpc_task
*rpc_call_null_helper(struct rpc_clnt
*clnt
,
2726 struct rpc_xprt
*xprt
, struct rpc_cred
*cred
, int flags
,
2727 const struct rpc_call_ops
*ops
, void *data
)
2729 struct rpc_message msg
= {
2730 .rpc_proc
= &rpcproc_null
,
2732 struct rpc_task_setup task_setup_data
= {
2735 .rpc_message
= &msg
,
2736 .rpc_op_cred
= cred
,
2737 .callback_ops
= (ops
!= NULL
) ? ops
: &rpc_default_ops
,
2738 .callback_data
= data
,
2739 .flags
= flags
| RPC_TASK_NULLCREDS
,
2742 return rpc_run_task(&task_setup_data
);
2745 struct rpc_task
*rpc_call_null(struct rpc_clnt
*clnt
, struct rpc_cred
*cred
, int flags
)
2747 return rpc_call_null_helper(clnt
, NULL
, cred
, flags
, NULL
, NULL
);
2749 EXPORT_SYMBOL_GPL(rpc_call_null
);
2751 struct rpc_cb_add_xprt_calldata
{
2752 struct rpc_xprt_switch
*xps
;
2753 struct rpc_xprt
*xprt
;
2756 static void rpc_cb_add_xprt_done(struct rpc_task
*task
, void *calldata
)
2758 struct rpc_cb_add_xprt_calldata
*data
= calldata
;
2760 if (task
->tk_status
== 0)
2761 rpc_xprt_switch_add_xprt(data
->xps
, data
->xprt
);
2764 static void rpc_cb_add_xprt_release(void *calldata
)
2766 struct rpc_cb_add_xprt_calldata
*data
= calldata
;
2768 xprt_put(data
->xprt
);
2769 xprt_switch_put(data
->xps
);
2773 static const struct rpc_call_ops rpc_cb_add_xprt_call_ops
= {
2774 .rpc_call_done
= rpc_cb_add_xprt_done
,
2775 .rpc_release
= rpc_cb_add_xprt_release
,
2779 * rpc_clnt_test_and_add_xprt - Test and add a new transport to a rpc_clnt
2780 * @clnt: pointer to struct rpc_clnt
2781 * @xps: pointer to struct rpc_xprt_switch,
2782 * @xprt: pointer struct rpc_xprt
2785 int rpc_clnt_test_and_add_xprt(struct rpc_clnt
*clnt
,
2786 struct rpc_xprt_switch
*xps
, struct rpc_xprt
*xprt
,
2789 struct rpc_cb_add_xprt_calldata
*data
;
2790 struct rpc_task
*task
;
2792 data
= kmalloc(sizeof(*data
), GFP_NOFS
);
2795 data
->xps
= xprt_switch_get(xps
);
2796 data
->xprt
= xprt_get(xprt
);
2797 if (rpc_xprt_switch_has_addr(data
->xps
, (struct sockaddr
*)&xprt
->addr
)) {
2798 rpc_cb_add_xprt_release(data
);
2802 task
= rpc_call_null_helper(clnt
, xprt
, NULL
,
2803 RPC_TASK_SOFT
|RPC_TASK_SOFTCONN
|RPC_TASK_ASYNC
|RPC_TASK_NULLCREDS
,
2804 &rpc_cb_add_xprt_call_ops
, data
);
2806 return PTR_ERR(task
);
2811 EXPORT_SYMBOL_GPL(rpc_clnt_test_and_add_xprt
);
2814 * rpc_clnt_setup_test_and_add_xprt()
2816 * This is an rpc_clnt_add_xprt setup() function which returns 1 so:
2817 * 1) caller of the test function must dereference the rpc_xprt_switch
2819 * 2) test function must call rpc_xprt_switch_add_xprt, usually in
2820 * the rpc_call_done routine.
2822 * Upon success (return of 1), the test function adds the new
2823 * transport to the rpc_clnt xprt switch
2825 * @clnt: struct rpc_clnt to get the new transport
2826 * @xps: the rpc_xprt_switch to hold the new transport
2827 * @xprt: the rpc_xprt to test
2828 * @data: a struct rpc_add_xprt_test pointer that holds the test function
2829 * and test function call data
2831 int rpc_clnt_setup_test_and_add_xprt(struct rpc_clnt
*clnt
,
2832 struct rpc_xprt_switch
*xps
,
2833 struct rpc_xprt
*xprt
,
2836 struct rpc_task
*task
;
2837 struct rpc_add_xprt_test
*xtest
= (struct rpc_add_xprt_test
*)data
;
2838 int status
= -EADDRINUSE
;
2840 xprt
= xprt_get(xprt
);
2841 xprt_switch_get(xps
);
2843 if (rpc_xprt_switch_has_addr(xps
, (struct sockaddr
*)&xprt
->addr
))
2846 /* Test the connection */
2847 task
= rpc_call_null_helper(clnt
, xprt
, NULL
,
2848 RPC_TASK_SOFT
| RPC_TASK_SOFTCONN
| RPC_TASK_NULLCREDS
,
2851 status
= PTR_ERR(task
);
2854 status
= task
->tk_status
;
2860 /* rpc_xprt_switch and rpc_xprt are deferrenced by add_xprt_test() */
2861 xtest
->add_xprt_test(clnt
, xprt
, xtest
->data
);
2864 xprt_switch_put(xps
);
2866 /* so that rpc_clnt_add_xprt does not call rpc_xprt_switch_add_xprt */
2870 xprt_switch_put(xps
);
2871 pr_info("RPC: rpc_clnt_test_xprt failed: %d addr %s not added\n",
2872 status
, xprt
->address_strings
[RPC_DISPLAY_ADDR
]);
2875 EXPORT_SYMBOL_GPL(rpc_clnt_setup_test_and_add_xprt
);
2878 * rpc_clnt_add_xprt - Add a new transport to a rpc_clnt
2879 * @clnt: pointer to struct rpc_clnt
2880 * @xprtargs: pointer to struct xprt_create
2881 * @setup: callback to test and/or set up the connection
2882 * @data: pointer to setup function data
2884 * Creates a new transport using the parameters set in args and
2886 * If ping is set, then test that connectivity succeeds before
2887 * adding the new transport.
2890 int rpc_clnt_add_xprt(struct rpc_clnt
*clnt
,
2891 struct xprt_create
*xprtargs
,
2892 int (*setup
)(struct rpc_clnt
*,
2893 struct rpc_xprt_switch
*,
2898 struct rpc_xprt_switch
*xps
;
2899 struct rpc_xprt
*xprt
;
2900 unsigned long connect_timeout
;
2901 unsigned long reconnect_timeout
;
2902 unsigned char resvport
, reuseport
;
2906 xps
= xprt_switch_get(rcu_dereference(clnt
->cl_xpi
.xpi_xpswitch
));
2907 xprt
= xprt_iter_xprt(&clnt
->cl_xpi
);
2908 if (xps
== NULL
|| xprt
== NULL
) {
2910 xprt_switch_put(xps
);
2913 resvport
= xprt
->resvport
;
2914 reuseport
= xprt
->reuseport
;
2915 connect_timeout
= xprt
->connect_timeout
;
2916 reconnect_timeout
= xprt
->max_reconnect_timeout
;
2919 xprt
= xprt_create_transport(xprtargs
);
2921 ret
= PTR_ERR(xprt
);
2922 goto out_put_switch
;
2924 xprt
->resvport
= resvport
;
2925 xprt
->reuseport
= reuseport
;
2926 if (xprt
->ops
->set_connect_timeout
!= NULL
)
2927 xprt
->ops
->set_connect_timeout(xprt
,
2931 rpc_xprt_switch_set_roundrobin(xps
);
2933 ret
= setup(clnt
, xps
, xprt
, data
);
2937 rpc_xprt_switch_add_xprt(xps
, xprt
);
2941 xprt_switch_put(xps
);
2944 EXPORT_SYMBOL_GPL(rpc_clnt_add_xprt
);
2946 struct connect_timeout_data
{
2947 unsigned long connect_timeout
;
2948 unsigned long reconnect_timeout
;
2952 rpc_xprt_set_connect_timeout(struct rpc_clnt
*clnt
,
2953 struct rpc_xprt
*xprt
,
2956 struct connect_timeout_data
*timeo
= data
;
2958 if (xprt
->ops
->set_connect_timeout
)
2959 xprt
->ops
->set_connect_timeout(xprt
,
2960 timeo
->connect_timeout
,
2961 timeo
->reconnect_timeout
);
2966 rpc_set_connect_timeout(struct rpc_clnt
*clnt
,
2967 unsigned long connect_timeout
,
2968 unsigned long reconnect_timeout
)
2970 struct connect_timeout_data timeout
= {
2971 .connect_timeout
= connect_timeout
,
2972 .reconnect_timeout
= reconnect_timeout
,
2974 rpc_clnt_iterate_for_each_xprt(clnt
,
2975 rpc_xprt_set_connect_timeout
,
2978 EXPORT_SYMBOL_GPL(rpc_set_connect_timeout
);
2980 void rpc_clnt_xprt_switch_put(struct rpc_clnt
*clnt
)
2983 xprt_switch_put(rcu_dereference(clnt
->cl_xpi
.xpi_xpswitch
));
2986 EXPORT_SYMBOL_GPL(rpc_clnt_xprt_switch_put
);
2988 void rpc_clnt_xprt_switch_add_xprt(struct rpc_clnt
*clnt
, struct rpc_xprt
*xprt
)
2991 rpc_xprt_switch_add_xprt(rcu_dereference(clnt
->cl_xpi
.xpi_xpswitch
),
2995 EXPORT_SYMBOL_GPL(rpc_clnt_xprt_switch_add_xprt
);
2997 bool rpc_clnt_xprt_switch_has_addr(struct rpc_clnt
*clnt
,
2998 const struct sockaddr
*sap
)
3000 struct rpc_xprt_switch
*xps
;
3004 xps
= rcu_dereference(clnt
->cl_xpi
.xpi_xpswitch
);
3005 ret
= rpc_xprt_switch_has_addr(xps
, sap
);
3009 EXPORT_SYMBOL_GPL(rpc_clnt_xprt_switch_has_addr
);
3011 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
3012 static void rpc_show_header(void)
3014 printk(KERN_INFO
"-pid- flgs status -client- --rqstp- "
3015 "-timeout ---ops--\n");
3018 static void rpc_show_task(const struct rpc_clnt
*clnt
,
3019 const struct rpc_task
*task
)
3021 const char *rpc_waitq
= "none";
3023 if (RPC_IS_QUEUED(task
))
3024 rpc_waitq
= rpc_qname(task
->tk_waitqueue
);
3026 printk(KERN_INFO
"%5u %04x %6d %8p %8p %8ld %8p %sv%u %s a:%ps q:%s\n",
3027 task
->tk_pid
, task
->tk_flags
, task
->tk_status
,
3028 clnt
, task
->tk_rqstp
, rpc_task_timeout(task
), task
->tk_ops
,
3029 clnt
->cl_program
->name
, clnt
->cl_vers
, rpc_proc_name(task
),
3030 task
->tk_action
, rpc_waitq
);
3033 void rpc_show_tasks(struct net
*net
)
3035 struct rpc_clnt
*clnt
;
3036 struct rpc_task
*task
;
3038 struct sunrpc_net
*sn
= net_generic(net
, sunrpc_net_id
);
3040 spin_lock(&sn
->rpc_client_lock
);
3041 list_for_each_entry(clnt
, &sn
->all_clients
, cl_clients
) {
3042 spin_lock(&clnt
->cl_lock
);
3043 list_for_each_entry(task
, &clnt
->cl_tasks
, tk_task
) {
3048 rpc_show_task(clnt
, task
);
3050 spin_unlock(&clnt
->cl_lock
);
3052 spin_unlock(&sn
->rpc_client_lock
);
3056 #if IS_ENABLED(CONFIG_SUNRPC_SWAP)
3058 rpc_clnt_swap_activate_callback(struct rpc_clnt
*clnt
,
3059 struct rpc_xprt
*xprt
,
3062 return xprt_enable_swap(xprt
);
3066 rpc_clnt_swap_activate(struct rpc_clnt
*clnt
)
3068 if (atomic_inc_return(&clnt
->cl_swapper
) == 1)
3069 return rpc_clnt_iterate_for_each_xprt(clnt
,
3070 rpc_clnt_swap_activate_callback
, NULL
);
3073 EXPORT_SYMBOL_GPL(rpc_clnt_swap_activate
);
3076 rpc_clnt_swap_deactivate_callback(struct rpc_clnt
*clnt
,
3077 struct rpc_xprt
*xprt
,
3080 xprt_disable_swap(xprt
);
3085 rpc_clnt_swap_deactivate(struct rpc_clnt
*clnt
)
3087 if (atomic_dec_if_positive(&clnt
->cl_swapper
) == 0)
3088 rpc_clnt_iterate_for_each_xprt(clnt
,
3089 rpc_clnt_swap_deactivate_callback
, NULL
);
3091 EXPORT_SYMBOL_GPL(rpc_clnt_swap_deactivate
);
3092 #endif /* CONFIG_SUNRPC_SWAP */