1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/net/sunrpc/clnt.c
5 * This file contains the high-level RPC interface.
6 * It is modeled as a finite state machine to support both synchronous
7 * and asynchronous requests.
9 * - RPC header generation and argument serialization.
10 * - Credential refresh.
11 * - TCP connect handling.
12 * - Retry of operation when it is suspected the operation failed because
13 * of uid squashing on the server, or when the credentials were stale
14 * and need to be refreshed, or when a packet was damaged in transit.
15 * This may be have to be moved to the VFS layer.
17 * Copyright (C) 1992,1993 Rick Sladkey <jrs@world.std.com>
18 * Copyright (C) 1995,1996 Olaf Kirch <okir@monad.swb.de>
22 #include <linux/module.h>
23 #include <linux/types.h>
24 #include <linux/kallsyms.h>
26 #include <linux/namei.h>
27 #include <linux/mount.h>
28 #include <linux/slab.h>
29 #include <linux/rcupdate.h>
30 #include <linux/utsname.h>
31 #include <linux/workqueue.h>
33 #include <linux/in6.h>
36 #include <linux/sunrpc/clnt.h>
37 #include <linux/sunrpc/addr.h>
38 #include <linux/sunrpc/rpc_pipe_fs.h>
39 #include <linux/sunrpc/metrics.h>
40 #include <linux/sunrpc/bc_xprt.h>
41 #include <trace/events/sunrpc.h>
46 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
47 # define RPCDBG_FACILITY RPCDBG_CALL
50 #define dprint_status(t) \
51 dprintk("RPC: %5u %s (status %d)\n", t->tk_pid, \
52 __func__, t->tk_status)
55 * All RPC clients are linked into this list
58 static DECLARE_WAIT_QUEUE_HEAD(destroy_wait
);
61 static void call_start(struct rpc_task
*task
);
62 static void call_reserve(struct rpc_task
*task
);
63 static void call_reserveresult(struct rpc_task
*task
);
64 static void call_allocate(struct rpc_task
*task
);
65 static void call_encode(struct rpc_task
*task
);
66 static void call_decode(struct rpc_task
*task
);
67 static void call_bind(struct rpc_task
*task
);
68 static void call_bind_status(struct rpc_task
*task
);
69 static void call_transmit(struct rpc_task
*task
);
70 static void call_status(struct rpc_task
*task
);
71 static void call_transmit_status(struct rpc_task
*task
);
72 static void call_refresh(struct rpc_task
*task
);
73 static void call_refreshresult(struct rpc_task
*task
);
74 static void call_connect(struct rpc_task
*task
);
75 static void call_connect_status(struct rpc_task
*task
);
77 static int rpc_encode_header(struct rpc_task
*task
,
78 struct xdr_stream
*xdr
);
79 static int rpc_decode_header(struct rpc_task
*task
,
80 struct xdr_stream
*xdr
);
81 static int rpc_ping(struct rpc_clnt
*clnt
);
82 static void rpc_check_timeout(struct rpc_task
*task
);
84 static void rpc_register_client(struct rpc_clnt
*clnt
)
86 struct net
*net
= rpc_net_ns(clnt
);
87 struct sunrpc_net
*sn
= net_generic(net
, sunrpc_net_id
);
89 spin_lock(&sn
->rpc_client_lock
);
90 list_add(&clnt
->cl_clients
, &sn
->all_clients
);
91 spin_unlock(&sn
->rpc_client_lock
);
94 static void rpc_unregister_client(struct rpc_clnt
*clnt
)
96 struct net
*net
= rpc_net_ns(clnt
);
97 struct sunrpc_net
*sn
= net_generic(net
, sunrpc_net_id
);
99 spin_lock(&sn
->rpc_client_lock
);
100 list_del(&clnt
->cl_clients
);
101 spin_unlock(&sn
->rpc_client_lock
);
104 static void __rpc_clnt_remove_pipedir(struct rpc_clnt
*clnt
)
106 rpc_remove_client_dir(clnt
);
109 static void rpc_clnt_remove_pipedir(struct rpc_clnt
*clnt
)
111 struct net
*net
= rpc_net_ns(clnt
);
112 struct super_block
*pipefs_sb
;
114 pipefs_sb
= rpc_get_sb_net(net
);
116 __rpc_clnt_remove_pipedir(clnt
);
121 static struct dentry
*rpc_setup_pipedir_sb(struct super_block
*sb
,
122 struct rpc_clnt
*clnt
)
124 static uint32_t clntid
;
125 const char *dir_name
= clnt
->cl_program
->pipe_dir_name
;
127 struct dentry
*dir
, *dentry
;
129 dir
= rpc_d_lookup_sb(sb
, dir_name
);
131 pr_info("RPC: pipefs directory doesn't exist: %s\n", dir_name
);
135 snprintf(name
, sizeof(name
), "clnt%x", (unsigned int)clntid
++);
136 name
[sizeof(name
) - 1] = '\0';
137 dentry
= rpc_create_client_dir(dir
, name
, clnt
);
140 if (dentry
== ERR_PTR(-EEXIST
))
142 printk(KERN_INFO
"RPC: Couldn't create pipefs entry"
143 " %s/%s, error %ld\n",
144 dir_name
, name
, PTR_ERR(dentry
));
152 rpc_setup_pipedir(struct super_block
*pipefs_sb
, struct rpc_clnt
*clnt
)
154 struct dentry
*dentry
;
156 if (clnt
->cl_program
->pipe_dir_name
!= NULL
) {
157 dentry
= rpc_setup_pipedir_sb(pipefs_sb
, clnt
);
159 return PTR_ERR(dentry
);
164 static int rpc_clnt_skip_event(struct rpc_clnt
*clnt
, unsigned long event
)
166 if (clnt
->cl_program
->pipe_dir_name
== NULL
)
170 case RPC_PIPEFS_MOUNT
:
171 if (clnt
->cl_pipedir_objects
.pdh_dentry
!= NULL
)
173 if (atomic_read(&clnt
->cl_count
) == 0)
176 case RPC_PIPEFS_UMOUNT
:
177 if (clnt
->cl_pipedir_objects
.pdh_dentry
== NULL
)
184 static int __rpc_clnt_handle_event(struct rpc_clnt
*clnt
, unsigned long event
,
185 struct super_block
*sb
)
187 struct dentry
*dentry
;
190 case RPC_PIPEFS_MOUNT
:
191 dentry
= rpc_setup_pipedir_sb(sb
, clnt
);
195 return PTR_ERR(dentry
);
197 case RPC_PIPEFS_UMOUNT
:
198 __rpc_clnt_remove_pipedir(clnt
);
201 printk(KERN_ERR
"%s: unknown event: %ld\n", __func__
, event
);
207 static int __rpc_pipefs_event(struct rpc_clnt
*clnt
, unsigned long event
,
208 struct super_block
*sb
)
212 for (;; clnt
= clnt
->cl_parent
) {
213 if (!rpc_clnt_skip_event(clnt
, event
))
214 error
= __rpc_clnt_handle_event(clnt
, event
, sb
);
215 if (error
|| clnt
== clnt
->cl_parent
)
221 static struct rpc_clnt
*rpc_get_client_for_event(struct net
*net
, int event
)
223 struct sunrpc_net
*sn
= net_generic(net
, sunrpc_net_id
);
224 struct rpc_clnt
*clnt
;
226 spin_lock(&sn
->rpc_client_lock
);
227 list_for_each_entry(clnt
, &sn
->all_clients
, cl_clients
) {
228 if (rpc_clnt_skip_event(clnt
, event
))
230 spin_unlock(&sn
->rpc_client_lock
);
233 spin_unlock(&sn
->rpc_client_lock
);
237 static int rpc_pipefs_event(struct notifier_block
*nb
, unsigned long event
,
240 struct super_block
*sb
= ptr
;
241 struct rpc_clnt
*clnt
;
244 while ((clnt
= rpc_get_client_for_event(sb
->s_fs_info
, event
))) {
245 error
= __rpc_pipefs_event(clnt
, event
, sb
);
252 static struct notifier_block rpc_clients_block
= {
253 .notifier_call
= rpc_pipefs_event
,
254 .priority
= SUNRPC_PIPEFS_RPC_PRIO
,
257 int rpc_clients_notifier_register(void)
259 return rpc_pipefs_notifier_register(&rpc_clients_block
);
262 void rpc_clients_notifier_unregister(void)
264 return rpc_pipefs_notifier_unregister(&rpc_clients_block
);
267 static struct rpc_xprt
*rpc_clnt_set_transport(struct rpc_clnt
*clnt
,
268 struct rpc_xprt
*xprt
,
269 const struct rpc_timeout
*timeout
)
271 struct rpc_xprt
*old
;
273 spin_lock(&clnt
->cl_lock
);
274 old
= rcu_dereference_protected(clnt
->cl_xprt
,
275 lockdep_is_held(&clnt
->cl_lock
));
277 if (!xprt_bound(xprt
))
278 clnt
->cl_autobind
= 1;
280 clnt
->cl_timeout
= timeout
;
281 rcu_assign_pointer(clnt
->cl_xprt
, xprt
);
282 spin_unlock(&clnt
->cl_lock
);
287 static void rpc_clnt_set_nodename(struct rpc_clnt
*clnt
, const char *nodename
)
289 clnt
->cl_nodelen
= strlcpy(clnt
->cl_nodename
,
290 nodename
, sizeof(clnt
->cl_nodename
));
293 static int rpc_client_register(struct rpc_clnt
*clnt
,
294 rpc_authflavor_t pseudoflavor
,
295 const char *client_name
)
297 struct rpc_auth_create_args auth_args
= {
298 .pseudoflavor
= pseudoflavor
,
299 .target_name
= client_name
,
301 struct rpc_auth
*auth
;
302 struct net
*net
= rpc_net_ns(clnt
);
303 struct super_block
*pipefs_sb
;
306 rpc_clnt_debugfs_register(clnt
);
308 pipefs_sb
= rpc_get_sb_net(net
);
310 err
= rpc_setup_pipedir(pipefs_sb
, clnt
);
315 rpc_register_client(clnt
);
319 auth
= rpcauth_create(&auth_args
, clnt
);
321 dprintk("RPC: Couldn't create auth handle (flavor %u)\n",
328 pipefs_sb
= rpc_get_sb_net(net
);
329 rpc_unregister_client(clnt
);
330 __rpc_clnt_remove_pipedir(clnt
);
334 rpc_clnt_debugfs_unregister(clnt
);
338 static DEFINE_IDA(rpc_clids
);
340 void rpc_cleanup_clids(void)
342 ida_destroy(&rpc_clids
);
345 static int rpc_alloc_clid(struct rpc_clnt
*clnt
)
349 clid
= ida_simple_get(&rpc_clids
, 0, 0, GFP_KERNEL
);
352 clnt
->cl_clid
= clid
;
356 static void rpc_free_clid(struct rpc_clnt
*clnt
)
358 ida_simple_remove(&rpc_clids
, clnt
->cl_clid
);
361 static struct rpc_clnt
* rpc_new_client(const struct rpc_create_args
*args
,
362 struct rpc_xprt_switch
*xps
,
363 struct rpc_xprt
*xprt
,
364 struct rpc_clnt
*parent
)
366 const struct rpc_program
*program
= args
->program
;
367 const struct rpc_version
*version
;
368 struct rpc_clnt
*clnt
= NULL
;
369 const struct rpc_timeout
*timeout
;
370 const char *nodename
= args
->nodename
;
373 /* sanity check the name before trying to print it */
374 dprintk("RPC: creating %s client for %s (xprt %p)\n",
375 program
->name
, args
->servername
, xprt
);
382 if (args
->version
>= program
->nrvers
)
384 version
= program
->version
[args
->version
];
389 clnt
= kzalloc(sizeof(*clnt
), GFP_KERNEL
);
392 clnt
->cl_parent
= parent
? : clnt
;
394 err
= rpc_alloc_clid(clnt
);
398 clnt
->cl_cred
= get_cred(args
->cred
);
399 clnt
->cl_procinfo
= version
->procs
;
400 clnt
->cl_maxproc
= version
->nrprocs
;
401 clnt
->cl_prog
= args
->prognumber
? : program
->number
;
402 clnt
->cl_vers
= version
->number
;
403 clnt
->cl_stats
= program
->stats
;
404 clnt
->cl_metrics
= rpc_alloc_iostats(clnt
);
405 rpc_init_pipe_dir_head(&clnt
->cl_pipedir_objects
);
407 if (clnt
->cl_metrics
== NULL
)
409 clnt
->cl_program
= program
;
410 INIT_LIST_HEAD(&clnt
->cl_tasks
);
411 spin_lock_init(&clnt
->cl_lock
);
413 timeout
= xprt
->timeout
;
414 if (args
->timeout
!= NULL
) {
415 memcpy(&clnt
->cl_timeout_default
, args
->timeout
,
416 sizeof(clnt
->cl_timeout_default
));
417 timeout
= &clnt
->cl_timeout_default
;
420 rpc_clnt_set_transport(clnt
, xprt
, timeout
);
421 xprt_iter_init(&clnt
->cl_xpi
, xps
);
422 xprt_switch_put(xps
);
424 clnt
->cl_rtt
= &clnt
->cl_rtt_default
;
425 rpc_init_rtt(&clnt
->cl_rtt_default
, clnt
->cl_timeout
->to_initval
);
427 atomic_set(&clnt
->cl_count
, 1);
429 if (nodename
== NULL
)
430 nodename
= utsname()->nodename
;
431 /* save the nodename */
432 rpc_clnt_set_nodename(clnt
, nodename
);
434 err
= rpc_client_register(clnt
, args
->authflavor
, args
->client_name
);
438 atomic_inc(&parent
->cl_count
);
442 rpc_free_iostats(clnt
->cl_metrics
);
444 put_cred(clnt
->cl_cred
);
451 xprt_switch_put(xps
);
456 static struct rpc_clnt
*rpc_create_xprt(struct rpc_create_args
*args
,
457 struct rpc_xprt
*xprt
)
459 struct rpc_clnt
*clnt
= NULL
;
460 struct rpc_xprt_switch
*xps
;
462 if (args
->bc_xprt
&& args
->bc_xprt
->xpt_bc_xps
) {
463 WARN_ON_ONCE(!(args
->protocol
& XPRT_TRANSPORT_BC
));
464 xps
= args
->bc_xprt
->xpt_bc_xps
;
465 xprt_switch_get(xps
);
467 xps
= xprt_switch_alloc(xprt
, GFP_KERNEL
);
470 return ERR_PTR(-ENOMEM
);
473 xprt_switch_get(xps
);
474 xprt
->bc_xprt
->xpt_bc_xps
= xps
;
477 clnt
= rpc_new_client(args
, xps
, xprt
, NULL
);
481 if (!(args
->flags
& RPC_CLNT_CREATE_NOPING
)) {
482 int err
= rpc_ping(clnt
);
484 rpc_shutdown_client(clnt
);
489 clnt
->cl_softrtry
= 1;
490 if (args
->flags
& (RPC_CLNT_CREATE_HARDRTRY
|RPC_CLNT_CREATE_SOFTERR
)) {
491 clnt
->cl_softrtry
= 0;
492 if (args
->flags
& RPC_CLNT_CREATE_SOFTERR
)
493 clnt
->cl_softerr
= 1;
496 if (args
->flags
& RPC_CLNT_CREATE_AUTOBIND
)
497 clnt
->cl_autobind
= 1;
498 if (args
->flags
& RPC_CLNT_CREATE_NO_RETRANS_TIMEOUT
)
499 clnt
->cl_noretranstimeo
= 1;
500 if (args
->flags
& RPC_CLNT_CREATE_DISCRTRY
)
501 clnt
->cl_discrtry
= 1;
502 if (!(args
->flags
& RPC_CLNT_CREATE_QUIET
))
509 * rpc_create - create an RPC client and transport with one call
510 * @args: rpc_clnt create argument structure
512 * Creates and initializes an RPC transport and an RPC client.
514 * It can ping the server in order to determine if it is up, and to see if
515 * it supports this program and version. RPC_CLNT_CREATE_NOPING disables
516 * this behavior so asynchronous tasks can also use rpc_create.
518 struct rpc_clnt
*rpc_create(struct rpc_create_args
*args
)
520 struct rpc_xprt
*xprt
;
521 struct xprt_create xprtargs
= {
523 .ident
= args
->protocol
,
524 .srcaddr
= args
->saddress
,
525 .dstaddr
= args
->address
,
526 .addrlen
= args
->addrsize
,
527 .servername
= args
->servername
,
528 .bc_xprt
= args
->bc_xprt
,
531 struct rpc_clnt
*clnt
;
535 WARN_ON_ONCE(!(args
->protocol
& XPRT_TRANSPORT_BC
));
536 xprt
= args
->bc_xprt
->xpt_bc_xprt
;
539 return rpc_create_xprt(args
, xprt
);
543 if (args
->flags
& RPC_CLNT_CREATE_INFINITE_SLOTS
)
544 xprtargs
.flags
|= XPRT_CREATE_INFINITE_SLOTS
;
545 if (args
->flags
& RPC_CLNT_CREATE_NO_IDLE_TIMEOUT
)
546 xprtargs
.flags
|= XPRT_CREATE_NO_IDLE_TIMEOUT
;
548 * If the caller chooses not to specify a hostname, whip
549 * up a string representation of the passed-in address.
551 if (xprtargs
.servername
== NULL
) {
552 struct sockaddr_un
*sun
=
553 (struct sockaddr_un
*)args
->address
;
554 struct sockaddr_in
*sin
=
555 (struct sockaddr_in
*)args
->address
;
556 struct sockaddr_in6
*sin6
=
557 (struct sockaddr_in6
*)args
->address
;
559 servername
[0] = '\0';
560 switch (args
->address
->sa_family
) {
562 snprintf(servername
, sizeof(servername
), "%s",
566 snprintf(servername
, sizeof(servername
), "%pI4",
567 &sin
->sin_addr
.s_addr
);
570 snprintf(servername
, sizeof(servername
), "%pI6",
574 /* caller wants default server name, but
575 * address family isn't recognized. */
576 return ERR_PTR(-EINVAL
);
578 xprtargs
.servername
= servername
;
581 xprt
= xprt_create_transport(&xprtargs
);
583 return (struct rpc_clnt
*)xprt
;
586 * By default, kernel RPC client connects from a reserved port.
587 * CAP_NET_BIND_SERVICE will not be set for unprivileged requesters,
588 * but it is always enabled for rpciod, which handles the connect
592 if (args
->flags
& RPC_CLNT_CREATE_NONPRIVPORT
)
595 if (args
->flags
& RPC_CLNT_CREATE_REUSEPORT
)
598 clnt
= rpc_create_xprt(args
, xprt
);
599 if (IS_ERR(clnt
) || args
->nconnect
<= 1)
602 for (i
= 0; i
< args
->nconnect
- 1; i
++) {
603 if (rpc_clnt_add_xprt(clnt
, &xprtargs
, NULL
, NULL
) < 0)
608 EXPORT_SYMBOL_GPL(rpc_create
);
611 * This function clones the RPC client structure. It allows us to share the
612 * same transport while varying parameters such as the authentication
615 static struct rpc_clnt
*__rpc_clone_client(struct rpc_create_args
*args
,
616 struct rpc_clnt
*clnt
)
618 struct rpc_xprt_switch
*xps
;
619 struct rpc_xprt
*xprt
;
620 struct rpc_clnt
*new;
625 xprt
= xprt_get(rcu_dereference(clnt
->cl_xprt
));
626 xps
= xprt_switch_get(rcu_dereference(clnt
->cl_xpi
.xpi_xpswitch
));
628 if (xprt
== NULL
|| xps
== NULL
) {
630 xprt_switch_put(xps
);
633 args
->servername
= xprt
->servername
;
634 args
->nodename
= clnt
->cl_nodename
;
636 new = rpc_new_client(args
, xps
, xprt
, clnt
);
642 /* Turn off autobind on clones */
643 new->cl_autobind
= 0;
644 new->cl_softrtry
= clnt
->cl_softrtry
;
645 new->cl_softerr
= clnt
->cl_softerr
;
646 new->cl_noretranstimeo
= clnt
->cl_noretranstimeo
;
647 new->cl_discrtry
= clnt
->cl_discrtry
;
648 new->cl_chatty
= clnt
->cl_chatty
;
649 new->cl_principal
= clnt
->cl_principal
;
653 dprintk("RPC: %s: returned error %d\n", __func__
, err
);
658 * rpc_clone_client - Clone an RPC client structure
660 * @clnt: RPC client whose parameters are copied
662 * Returns a fresh RPC client or an ERR_PTR.
664 struct rpc_clnt
*rpc_clone_client(struct rpc_clnt
*clnt
)
666 struct rpc_create_args args
= {
667 .program
= clnt
->cl_program
,
668 .prognumber
= clnt
->cl_prog
,
669 .version
= clnt
->cl_vers
,
670 .authflavor
= clnt
->cl_auth
->au_flavor
,
671 .cred
= clnt
->cl_cred
,
673 return __rpc_clone_client(&args
, clnt
);
675 EXPORT_SYMBOL_GPL(rpc_clone_client
);
678 * rpc_clone_client_set_auth - Clone an RPC client structure and set its auth
680 * @clnt: RPC client whose parameters are copied
681 * @flavor: security flavor for new client
683 * Returns a fresh RPC client or an ERR_PTR.
686 rpc_clone_client_set_auth(struct rpc_clnt
*clnt
, rpc_authflavor_t flavor
)
688 struct rpc_create_args args
= {
689 .program
= clnt
->cl_program
,
690 .prognumber
= clnt
->cl_prog
,
691 .version
= clnt
->cl_vers
,
692 .authflavor
= flavor
,
693 .cred
= clnt
->cl_cred
,
695 return __rpc_clone_client(&args
, clnt
);
697 EXPORT_SYMBOL_GPL(rpc_clone_client_set_auth
);
700 * rpc_switch_client_transport: switch the RPC transport on the fly
701 * @clnt: pointer to a struct rpc_clnt
702 * @args: pointer to the new transport arguments
703 * @timeout: pointer to the new timeout parameters
705 * This function allows the caller to switch the RPC transport for the
706 * rpc_clnt structure 'clnt' to allow it to connect to a mirrored NFS
707 * server, for instance. It assumes that the caller has ensured that
708 * there are no active RPC tasks by using some form of locking.
710 * Returns zero if "clnt" is now using the new xprt. Otherwise a
711 * negative errno is returned, and "clnt" continues to use the old
714 int rpc_switch_client_transport(struct rpc_clnt
*clnt
,
715 struct xprt_create
*args
,
716 const struct rpc_timeout
*timeout
)
718 const struct rpc_timeout
*old_timeo
;
719 rpc_authflavor_t pseudoflavor
;
720 struct rpc_xprt_switch
*xps
, *oldxps
;
721 struct rpc_xprt
*xprt
, *old
;
722 struct rpc_clnt
*parent
;
725 xprt
= xprt_create_transport(args
);
727 dprintk("RPC: failed to create new xprt for clnt %p\n",
729 return PTR_ERR(xprt
);
732 xps
= xprt_switch_alloc(xprt
, GFP_KERNEL
);
738 pseudoflavor
= clnt
->cl_auth
->au_flavor
;
740 old_timeo
= clnt
->cl_timeout
;
741 old
= rpc_clnt_set_transport(clnt
, xprt
, timeout
);
742 oldxps
= xprt_iter_xchg_switch(&clnt
->cl_xpi
, xps
);
744 rpc_unregister_client(clnt
);
745 __rpc_clnt_remove_pipedir(clnt
);
746 rpc_clnt_debugfs_unregister(clnt
);
749 * A new transport was created. "clnt" therefore
750 * becomes the root of a new cl_parent tree. clnt's
751 * children, if it has any, still point to the old xprt.
753 parent
= clnt
->cl_parent
;
754 clnt
->cl_parent
= clnt
;
757 * The old rpc_auth cache cannot be re-used. GSS
758 * contexts in particular are between a single
761 err
= rpc_client_register(clnt
, pseudoflavor
, NULL
);
767 rpc_release_client(parent
);
768 xprt_switch_put(oldxps
);
770 dprintk("RPC: replaced xprt for clnt %p\n", clnt
);
774 xps
= xprt_iter_xchg_switch(&clnt
->cl_xpi
, oldxps
);
775 rpc_clnt_set_transport(clnt
, old
, old_timeo
);
776 clnt
->cl_parent
= parent
;
777 rpc_client_register(clnt
, pseudoflavor
, NULL
);
778 xprt_switch_put(xps
);
780 dprintk("RPC: failed to switch xprt for clnt %p\n", clnt
);
783 EXPORT_SYMBOL_GPL(rpc_switch_client_transport
);
786 int rpc_clnt_xprt_iter_init(struct rpc_clnt
*clnt
, struct rpc_xprt_iter
*xpi
)
788 struct rpc_xprt_switch
*xps
;
791 xps
= xprt_switch_get(rcu_dereference(clnt
->cl_xpi
.xpi_xpswitch
));
795 xprt_iter_init_listall(xpi
, xps
);
796 xprt_switch_put(xps
);
801 * rpc_clnt_iterate_for_each_xprt - Apply a function to all transports
802 * @clnt: pointer to client
803 * @fn: function to apply
804 * @data: void pointer to function data
806 * Iterates through the list of RPC transports currently attached to the
807 * client and applies the function fn(clnt, xprt, data).
809 * On error, the iteration stops, and the function returns the error value.
811 int rpc_clnt_iterate_for_each_xprt(struct rpc_clnt
*clnt
,
812 int (*fn
)(struct rpc_clnt
*, struct rpc_xprt
*, void *),
815 struct rpc_xprt_iter xpi
;
818 ret
= rpc_clnt_xprt_iter_init(clnt
, &xpi
);
822 struct rpc_xprt
*xprt
= xprt_iter_get_next(&xpi
);
826 ret
= fn(clnt
, xprt
, data
);
831 xprt_iter_destroy(&xpi
);
834 EXPORT_SYMBOL_GPL(rpc_clnt_iterate_for_each_xprt
);
837 * Kill all tasks for the given client.
838 * XXX: kill their descendants as well?
840 void rpc_killall_tasks(struct rpc_clnt
*clnt
)
842 struct rpc_task
*rovr
;
845 if (list_empty(&clnt
->cl_tasks
))
847 dprintk("RPC: killing all tasks for client %p\n", clnt
);
849 * Spin lock all_tasks to prevent changes...
851 spin_lock(&clnt
->cl_lock
);
852 list_for_each_entry(rovr
, &clnt
->cl_tasks
, tk_task
)
853 rpc_signal_task(rovr
);
854 spin_unlock(&clnt
->cl_lock
);
856 EXPORT_SYMBOL_GPL(rpc_killall_tasks
);
859 * Properly shut down an RPC client, terminating all outstanding
862 void rpc_shutdown_client(struct rpc_clnt
*clnt
)
866 dprintk_rcu("RPC: shutting down %s client for %s\n",
867 clnt
->cl_program
->name
,
868 rcu_dereference(clnt
->cl_xprt
)->servername
);
870 while (!list_empty(&clnt
->cl_tasks
)) {
871 rpc_killall_tasks(clnt
);
872 wait_event_timeout(destroy_wait
,
873 list_empty(&clnt
->cl_tasks
), 1*HZ
);
876 rpc_release_client(clnt
);
878 EXPORT_SYMBOL_GPL(rpc_shutdown_client
);
883 static struct rpc_clnt
*
884 rpc_free_client(struct rpc_clnt
*clnt
)
886 struct rpc_clnt
*parent
= NULL
;
888 dprintk_rcu("RPC: destroying %s client for %s\n",
889 clnt
->cl_program
->name
,
890 rcu_dereference(clnt
->cl_xprt
)->servername
);
891 if (clnt
->cl_parent
!= clnt
)
892 parent
= clnt
->cl_parent
;
893 rpc_clnt_debugfs_unregister(clnt
);
894 rpc_clnt_remove_pipedir(clnt
);
895 rpc_unregister_client(clnt
);
896 rpc_free_iostats(clnt
->cl_metrics
);
897 clnt
->cl_metrics
= NULL
;
898 xprt_put(rcu_dereference_raw(clnt
->cl_xprt
));
899 xprt_iter_destroy(&clnt
->cl_xpi
);
901 put_cred(clnt
->cl_cred
);
910 static struct rpc_clnt
*
911 rpc_free_auth(struct rpc_clnt
*clnt
)
913 if (clnt
->cl_auth
== NULL
)
914 return rpc_free_client(clnt
);
917 * Note: RPCSEC_GSS may need to send NULL RPC calls in order to
918 * release remaining GSS contexts. This mechanism ensures
919 * that it can do so safely.
921 atomic_inc(&clnt
->cl_count
);
922 rpcauth_release(clnt
->cl_auth
);
923 clnt
->cl_auth
= NULL
;
924 if (atomic_dec_and_test(&clnt
->cl_count
))
925 return rpc_free_client(clnt
);
930 * Release reference to the RPC client
933 rpc_release_client(struct rpc_clnt
*clnt
)
935 dprintk("RPC: rpc_release_client(%p)\n", clnt
);
938 if (list_empty(&clnt
->cl_tasks
))
939 wake_up(&destroy_wait
);
940 if (!atomic_dec_and_test(&clnt
->cl_count
))
942 clnt
= rpc_free_auth(clnt
);
943 } while (clnt
!= NULL
);
945 EXPORT_SYMBOL_GPL(rpc_release_client
);
948 * rpc_bind_new_program - bind a new RPC program to an existing client
949 * @old: old rpc_client
950 * @program: rpc program to set
951 * @vers: rpc program version
953 * Clones the rpc client and sets up a new RPC program. This is mainly
954 * of use for enabling different RPC programs to share the same transport.
955 * The Sun NFSv2/v3 ACL protocol can do this.
957 struct rpc_clnt
*rpc_bind_new_program(struct rpc_clnt
*old
,
958 const struct rpc_program
*program
,
961 struct rpc_create_args args
= {
963 .prognumber
= program
->number
,
965 .authflavor
= old
->cl_auth
->au_flavor
,
966 .cred
= old
->cl_cred
,
968 struct rpc_clnt
*clnt
;
971 clnt
= __rpc_clone_client(&args
, old
);
974 err
= rpc_ping(clnt
);
976 rpc_shutdown_client(clnt
);
982 EXPORT_SYMBOL_GPL(rpc_bind_new_program
);
985 rpc_task_get_xprt(struct rpc_clnt
*clnt
, struct rpc_xprt
*xprt
)
987 struct rpc_xprt_switch
*xps
;
992 xps
= rcu_dereference(clnt
->cl_xpi
.xpi_xpswitch
);
993 atomic_long_inc(&xps
->xps_queuelen
);
995 atomic_long_inc(&xprt
->queuelen
);
1001 rpc_task_release_xprt(struct rpc_clnt
*clnt
, struct rpc_xprt
*xprt
)
1003 struct rpc_xprt_switch
*xps
;
1005 atomic_long_dec(&xprt
->queuelen
);
1007 xps
= rcu_dereference(clnt
->cl_xpi
.xpi_xpswitch
);
1008 atomic_long_dec(&xps
->xps_queuelen
);
1014 void rpc_task_release_transport(struct rpc_task
*task
)
1016 struct rpc_xprt
*xprt
= task
->tk_xprt
;
1019 task
->tk_xprt
= NULL
;
1020 if (task
->tk_client
)
1021 rpc_task_release_xprt(task
->tk_client
, xprt
);
1026 EXPORT_SYMBOL_GPL(rpc_task_release_transport
);
1028 void rpc_task_release_client(struct rpc_task
*task
)
1030 struct rpc_clnt
*clnt
= task
->tk_client
;
1032 rpc_task_release_transport(task
);
1034 /* Remove from client task list */
1035 spin_lock(&clnt
->cl_lock
);
1036 list_del(&task
->tk_task
);
1037 spin_unlock(&clnt
->cl_lock
);
1038 task
->tk_client
= NULL
;
1040 rpc_release_client(clnt
);
1044 static struct rpc_xprt
*
1045 rpc_task_get_first_xprt(struct rpc_clnt
*clnt
)
1047 struct rpc_xprt
*xprt
;
1050 xprt
= xprt_get(rcu_dereference(clnt
->cl_xprt
));
1052 return rpc_task_get_xprt(clnt
, xprt
);
1055 static struct rpc_xprt
*
1056 rpc_task_get_next_xprt(struct rpc_clnt
*clnt
)
1058 return rpc_task_get_xprt(clnt
, xprt_iter_get_next(&clnt
->cl_xpi
));
1062 void rpc_task_set_transport(struct rpc_task
*task
, struct rpc_clnt
*clnt
)
1066 if (task
->tk_flags
& RPC_TASK_NO_ROUND_ROBIN
)
1067 task
->tk_xprt
= rpc_task_get_first_xprt(clnt
);
1069 task
->tk_xprt
= rpc_task_get_next_xprt(clnt
);
1073 void rpc_task_set_client(struct rpc_task
*task
, struct rpc_clnt
*clnt
)
1077 rpc_task_set_transport(task
, clnt
);
1078 task
->tk_client
= clnt
;
1079 atomic_inc(&clnt
->cl_count
);
1080 if (clnt
->cl_softrtry
)
1081 task
->tk_flags
|= RPC_TASK_SOFT
;
1082 if (clnt
->cl_softerr
)
1083 task
->tk_flags
|= RPC_TASK_TIMEOUT
;
1084 if (clnt
->cl_noretranstimeo
)
1085 task
->tk_flags
|= RPC_TASK_NO_RETRANS_TIMEOUT
;
1086 if (atomic_read(&clnt
->cl_swapper
))
1087 task
->tk_flags
|= RPC_TASK_SWAPPER
;
1088 /* Add to the client's list of all tasks */
1089 spin_lock(&clnt
->cl_lock
);
1090 list_add_tail(&task
->tk_task
, &clnt
->cl_tasks
);
1091 spin_unlock(&clnt
->cl_lock
);
1096 rpc_task_set_rpc_message(struct rpc_task
*task
, const struct rpc_message
*msg
)
1099 task
->tk_msg
.rpc_proc
= msg
->rpc_proc
;
1100 task
->tk_msg
.rpc_argp
= msg
->rpc_argp
;
1101 task
->tk_msg
.rpc_resp
= msg
->rpc_resp
;
1102 task
->tk_msg
.rpc_cred
= msg
->rpc_cred
;
1103 if (!(task
->tk_flags
& RPC_TASK_CRED_NOREF
))
1104 get_cred(task
->tk_msg
.rpc_cred
);
1109 * Default callback for async RPC calls
1112 rpc_default_callback(struct rpc_task
*task
, void *data
)
1116 static const struct rpc_call_ops rpc_default_ops
= {
1117 .rpc_call_done
= rpc_default_callback
,
1121 * rpc_run_task - Allocate a new RPC task, then run rpc_execute against it
1122 * @task_setup_data: pointer to task initialisation data
1124 struct rpc_task
*rpc_run_task(const struct rpc_task_setup
*task_setup_data
)
1126 struct rpc_task
*task
;
1128 task
= rpc_new_task(task_setup_data
);
1130 if (!RPC_IS_ASYNC(task
))
1131 task
->tk_flags
|= RPC_TASK_CRED_NOREF
;
1133 rpc_task_set_client(task
, task_setup_data
->rpc_client
);
1134 rpc_task_set_rpc_message(task
, task_setup_data
->rpc_message
);
1136 if (task
->tk_action
== NULL
)
1137 rpc_call_start(task
);
1139 atomic_inc(&task
->tk_count
);
1143 EXPORT_SYMBOL_GPL(rpc_run_task
);
1146 * rpc_call_sync - Perform a synchronous RPC call
1147 * @clnt: pointer to RPC client
1148 * @msg: RPC call parameters
1149 * @flags: RPC call flags
1151 int rpc_call_sync(struct rpc_clnt
*clnt
, const struct rpc_message
*msg
, int flags
)
1153 struct rpc_task
*task
;
1154 struct rpc_task_setup task_setup_data
= {
1157 .callback_ops
= &rpc_default_ops
,
1162 WARN_ON_ONCE(flags
& RPC_TASK_ASYNC
);
1163 if (flags
& RPC_TASK_ASYNC
) {
1164 rpc_release_calldata(task_setup_data
.callback_ops
,
1165 task_setup_data
.callback_data
);
1169 task
= rpc_run_task(&task_setup_data
);
1171 return PTR_ERR(task
);
1172 status
= task
->tk_status
;
1176 EXPORT_SYMBOL_GPL(rpc_call_sync
);
1179 * rpc_call_async - Perform an asynchronous RPC call
1180 * @clnt: pointer to RPC client
1181 * @msg: RPC call parameters
1182 * @flags: RPC call flags
1183 * @tk_ops: RPC call ops
1184 * @data: user call data
1187 rpc_call_async(struct rpc_clnt
*clnt
, const struct rpc_message
*msg
, int flags
,
1188 const struct rpc_call_ops
*tk_ops
, void *data
)
1190 struct rpc_task
*task
;
1191 struct rpc_task_setup task_setup_data
= {
1194 .callback_ops
= tk_ops
,
1195 .callback_data
= data
,
1196 .flags
= flags
|RPC_TASK_ASYNC
,
1199 task
= rpc_run_task(&task_setup_data
);
1201 return PTR_ERR(task
);
1205 EXPORT_SYMBOL_GPL(rpc_call_async
);
1207 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
1208 static void call_bc_encode(struct rpc_task
*task
);
1211 * rpc_run_bc_task - Allocate a new RPC task for backchannel use, then run
1212 * rpc_execute against it
1215 struct rpc_task
*rpc_run_bc_task(struct rpc_rqst
*req
)
1217 struct rpc_task
*task
;
1218 struct rpc_task_setup task_setup_data
= {
1219 .callback_ops
= &rpc_default_ops
,
1220 .flags
= RPC_TASK_SOFTCONN
|
1221 RPC_TASK_NO_RETRANS_TIMEOUT
,
1224 dprintk("RPC: rpc_run_bc_task req= %p\n", req
);
1226 * Create an rpc_task to send the data
1228 task
= rpc_new_task(&task_setup_data
);
1229 xprt_init_bc_request(req
, task
);
1231 task
->tk_action
= call_bc_encode
;
1232 atomic_inc(&task
->tk_count
);
1233 WARN_ON_ONCE(atomic_read(&task
->tk_count
) != 2);
1236 dprintk("RPC: rpc_run_bc_task: task= %p\n", task
);
1239 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
1242 * rpc_prepare_reply_pages - Prepare to receive a reply data payload into pages
1243 * @req: RPC request to prepare
1244 * @pages: vector of struct page pointers
1245 * @base: offset in first page where receive should start, in bytes
1246 * @len: expected size of the upper layer data payload, in bytes
1247 * @hdrsize: expected size of upper layer reply header, in XDR words
1250 void rpc_prepare_reply_pages(struct rpc_rqst
*req
, struct page
**pages
,
1251 unsigned int base
, unsigned int len
,
1252 unsigned int hdrsize
)
1254 /* Subtract one to force an extra word of buffer space for the
1255 * payload's XDR pad to fall into the rcv_buf's tail iovec.
1257 hdrsize
+= RPC_REPHDRSIZE
+ req
->rq_cred
->cr_auth
->au_ralign
- 1;
1259 xdr_inline_pages(&req
->rq_rcv_buf
, hdrsize
<< 2, pages
, base
, len
);
1260 trace_rpc_reply_pages(req
);
1262 EXPORT_SYMBOL_GPL(rpc_prepare_reply_pages
);
1265 rpc_call_start(struct rpc_task
*task
)
1267 task
->tk_action
= call_start
;
1269 EXPORT_SYMBOL_GPL(rpc_call_start
);
1272 * rpc_peeraddr - extract remote peer address from clnt's xprt
1273 * @clnt: RPC client structure
1274 * @buf: target buffer
1275 * @bufsize: length of target buffer
1277 * Returns the number of bytes that are actually in the stored address.
1279 size_t rpc_peeraddr(struct rpc_clnt
*clnt
, struct sockaddr
*buf
, size_t bufsize
)
1282 struct rpc_xprt
*xprt
;
1285 xprt
= rcu_dereference(clnt
->cl_xprt
);
1287 bytes
= xprt
->addrlen
;
1288 if (bytes
> bufsize
)
1290 memcpy(buf
, &xprt
->addr
, bytes
);
1295 EXPORT_SYMBOL_GPL(rpc_peeraddr
);
1298 * rpc_peeraddr2str - return remote peer address in printable format
1299 * @clnt: RPC client structure
1300 * @format: address format
1302 * NB: the lifetime of the memory referenced by the returned pointer is
1303 * the same as the rpc_xprt itself. As long as the caller uses this
1304 * pointer, it must hold the RCU read lock.
1306 const char *rpc_peeraddr2str(struct rpc_clnt
*clnt
,
1307 enum rpc_display_format_t format
)
1309 struct rpc_xprt
*xprt
;
1311 xprt
= rcu_dereference(clnt
->cl_xprt
);
1313 if (xprt
->address_strings
[format
] != NULL
)
1314 return xprt
->address_strings
[format
];
1316 return "unprintable";
1318 EXPORT_SYMBOL_GPL(rpc_peeraddr2str
);
1320 static const struct sockaddr_in rpc_inaddr_loopback
= {
1321 .sin_family
= AF_INET
,
1322 .sin_addr
.s_addr
= htonl(INADDR_ANY
),
1325 static const struct sockaddr_in6 rpc_in6addr_loopback
= {
1326 .sin6_family
= AF_INET6
,
1327 .sin6_addr
= IN6ADDR_ANY_INIT
,
1331 * Try a getsockname() on a connected datagram socket. Using a
1332 * connected datagram socket prevents leaving a socket in TIME_WAIT.
1333 * This conserves the ephemeral port number space.
1335 * Returns zero and fills in "buf" if successful; otherwise, a
1336 * negative errno is returned.
1338 static int rpc_sockname(struct net
*net
, struct sockaddr
*sap
, size_t salen
,
1339 struct sockaddr
*buf
)
1341 struct socket
*sock
;
1344 err
= __sock_create(net
, sap
->sa_family
,
1345 SOCK_DGRAM
, IPPROTO_UDP
, &sock
, 1);
1347 dprintk("RPC: can't create UDP socket (%d)\n", err
);
1351 switch (sap
->sa_family
) {
1353 err
= kernel_bind(sock
,
1354 (struct sockaddr
*)&rpc_inaddr_loopback
,
1355 sizeof(rpc_inaddr_loopback
));
1358 err
= kernel_bind(sock
,
1359 (struct sockaddr
*)&rpc_in6addr_loopback
,
1360 sizeof(rpc_in6addr_loopback
));
1363 err
= -EAFNOSUPPORT
;
1367 dprintk("RPC: can't bind UDP socket (%d)\n", err
);
1371 err
= kernel_connect(sock
, sap
, salen
, 0);
1373 dprintk("RPC: can't connect UDP socket (%d)\n", err
);
1377 err
= kernel_getsockname(sock
, buf
);
1379 dprintk("RPC: getsockname failed (%d)\n", err
);
1384 if (buf
->sa_family
== AF_INET6
) {
1385 struct sockaddr_in6
*sin6
= (struct sockaddr_in6
*)buf
;
1386 sin6
->sin6_scope_id
= 0;
1388 dprintk("RPC: %s succeeded\n", __func__
);
1397 * Scraping a connected socket failed, so we don't have a useable
1398 * local address. Fallback: generate an address that will prevent
1399 * the server from calling us back.
1401 * Returns zero and fills in "buf" if successful; otherwise, a
1402 * negative errno is returned.
1404 static int rpc_anyaddr(int family
, struct sockaddr
*buf
, size_t buflen
)
1408 if (buflen
< sizeof(rpc_inaddr_loopback
))
1410 memcpy(buf
, &rpc_inaddr_loopback
,
1411 sizeof(rpc_inaddr_loopback
));
1414 if (buflen
< sizeof(rpc_in6addr_loopback
))
1416 memcpy(buf
, &rpc_in6addr_loopback
,
1417 sizeof(rpc_in6addr_loopback
));
1420 dprintk("RPC: %s: address family not supported\n",
1422 return -EAFNOSUPPORT
;
1424 dprintk("RPC: %s: succeeded\n", __func__
);
1429 * rpc_localaddr - discover local endpoint address for an RPC client
1430 * @clnt: RPC client structure
1431 * @buf: target buffer
1432 * @buflen: size of target buffer, in bytes
1434 * Returns zero and fills in "buf" and "buflen" if successful;
1435 * otherwise, a negative errno is returned.
1437 * This works even if the underlying transport is not currently connected,
1438 * or if the upper layer never previously provided a source address.
1440 * The result of this function call is transient: multiple calls in
1441 * succession may give different results, depending on how local
1442 * networking configuration changes over time.
1444 int rpc_localaddr(struct rpc_clnt
*clnt
, struct sockaddr
*buf
, size_t buflen
)
1446 struct sockaddr_storage address
;
1447 struct sockaddr
*sap
= (struct sockaddr
*)&address
;
1448 struct rpc_xprt
*xprt
;
1454 xprt
= rcu_dereference(clnt
->cl_xprt
);
1455 salen
= xprt
->addrlen
;
1456 memcpy(sap
, &xprt
->addr
, salen
);
1457 net
= get_net(xprt
->xprt_net
);
1460 rpc_set_port(sap
, 0);
1461 err
= rpc_sockname(net
, sap
, salen
, buf
);
1464 /* Couldn't discover local address, return ANYADDR */
1465 return rpc_anyaddr(sap
->sa_family
, buf
, buflen
);
1468 EXPORT_SYMBOL_GPL(rpc_localaddr
);
1471 rpc_setbufsize(struct rpc_clnt
*clnt
, unsigned int sndsize
, unsigned int rcvsize
)
1473 struct rpc_xprt
*xprt
;
1476 xprt
= rcu_dereference(clnt
->cl_xprt
);
1477 if (xprt
->ops
->set_buffer_size
)
1478 xprt
->ops
->set_buffer_size(xprt
, sndsize
, rcvsize
);
1481 EXPORT_SYMBOL_GPL(rpc_setbufsize
);
1484 * rpc_net_ns - Get the network namespace for this RPC client
1485 * @clnt: RPC client to query
1488 struct net
*rpc_net_ns(struct rpc_clnt
*clnt
)
1493 ret
= rcu_dereference(clnt
->cl_xprt
)->xprt_net
;
1497 EXPORT_SYMBOL_GPL(rpc_net_ns
);
1500 * rpc_max_payload - Get maximum payload size for a transport, in bytes
1501 * @clnt: RPC client to query
1503 * For stream transports, this is one RPC record fragment (see RFC
1504 * 1831), as we don't support multi-record requests yet. For datagram
1505 * transports, this is the size of an IP packet minus the IP, UDP, and
1508 size_t rpc_max_payload(struct rpc_clnt
*clnt
)
1513 ret
= rcu_dereference(clnt
->cl_xprt
)->max_payload
;
1517 EXPORT_SYMBOL_GPL(rpc_max_payload
);
1520 * rpc_max_bc_payload - Get maximum backchannel payload size, in bytes
1521 * @clnt: RPC client to query
1523 size_t rpc_max_bc_payload(struct rpc_clnt
*clnt
)
1525 struct rpc_xprt
*xprt
;
1529 xprt
= rcu_dereference(clnt
->cl_xprt
);
1530 ret
= xprt
->ops
->bc_maxpayload(xprt
);
1534 EXPORT_SYMBOL_GPL(rpc_max_bc_payload
);
1536 unsigned int rpc_num_bc_slots(struct rpc_clnt
*clnt
)
1538 struct rpc_xprt
*xprt
;
1542 xprt
= rcu_dereference(clnt
->cl_xprt
);
1543 ret
= xprt
->ops
->bc_num_slots(xprt
);
1547 EXPORT_SYMBOL_GPL(rpc_num_bc_slots
);
1550 * rpc_force_rebind - force transport to check that remote port is unchanged
1551 * @clnt: client to rebind
1554 void rpc_force_rebind(struct rpc_clnt
*clnt
)
1556 if (clnt
->cl_autobind
) {
1558 xprt_clear_bound(rcu_dereference(clnt
->cl_xprt
));
1562 EXPORT_SYMBOL_GPL(rpc_force_rebind
);
1565 __rpc_restart_call(struct rpc_task
*task
, void (*action
)(struct rpc_task
*))
1567 task
->tk_status
= 0;
1568 task
->tk_rpc_status
= 0;
1569 task
->tk_action
= action
;
1574 * Restart an (async) RPC call. Usually called from within the
1578 rpc_restart_call(struct rpc_task
*task
)
1580 return __rpc_restart_call(task
, call_start
);
1582 EXPORT_SYMBOL_GPL(rpc_restart_call
);
1585 * Restart an (async) RPC call from the call_prepare state.
1586 * Usually called from within the exit handler.
1589 rpc_restart_call_prepare(struct rpc_task
*task
)
1591 if (task
->tk_ops
->rpc_call_prepare
!= NULL
)
1592 return __rpc_restart_call(task
, rpc_prepare_task
);
1593 return rpc_restart_call(task
);
1595 EXPORT_SYMBOL_GPL(rpc_restart_call_prepare
);
1598 *rpc_proc_name(const struct rpc_task
*task
)
1600 const struct rpc_procinfo
*proc
= task
->tk_msg
.rpc_proc
;
1604 return proc
->p_name
;
1612 __rpc_call_rpcerror(struct rpc_task
*task
, int tk_status
, int rpc_status
)
1614 task
->tk_rpc_status
= rpc_status
;
1615 rpc_exit(task
, tk_status
);
1619 rpc_call_rpcerror(struct rpc_task
*task
, int status
)
1621 __rpc_call_rpcerror(task
, status
, status
);
1627 * Other FSM states can be visited zero or more times, but
1628 * this state is visited exactly once for each RPC.
1631 call_start(struct rpc_task
*task
)
1633 struct rpc_clnt
*clnt
= task
->tk_client
;
1634 int idx
= task
->tk_msg
.rpc_proc
->p_statidx
;
1636 trace_rpc_request(task
);
1637 dprintk("RPC: %5u call_start %s%d proc %s (%s)\n", task
->tk_pid
,
1638 clnt
->cl_program
->name
, clnt
->cl_vers
,
1639 rpc_proc_name(task
),
1640 (RPC_IS_ASYNC(task
) ? "async" : "sync"));
1642 /* Increment call count (version might not be valid for ping) */
1643 if (clnt
->cl_program
->version
[clnt
->cl_vers
])
1644 clnt
->cl_program
->version
[clnt
->cl_vers
]->counts
[idx
]++;
1645 clnt
->cl_stats
->rpccnt
++;
1646 task
->tk_action
= call_reserve
;
1647 rpc_task_set_transport(task
, clnt
);
1651 * 1. Reserve an RPC call slot
1654 call_reserve(struct rpc_task
*task
)
1656 dprint_status(task
);
1658 task
->tk_status
= 0;
1659 task
->tk_action
= call_reserveresult
;
1663 static void call_retry_reserve(struct rpc_task
*task
);
1666 * 1b. Grok the result of xprt_reserve()
1669 call_reserveresult(struct rpc_task
*task
)
1671 int status
= task
->tk_status
;
1673 dprint_status(task
);
1676 * After a call to xprt_reserve(), we must have either
1677 * a request slot or else an error status.
1679 task
->tk_status
= 0;
1681 if (task
->tk_rqstp
) {
1682 task
->tk_action
= call_refresh
;
1686 rpc_call_rpcerror(task
, -EIO
);
1691 * Even though there was an error, we may have acquired
1692 * a request slot somehow. Make sure not to leak it.
1699 rpc_delay(task
, HZ
>> 2);
1701 case -EAGAIN
: /* woken up; retry */
1702 task
->tk_action
= call_retry_reserve
;
1705 rpc_call_rpcerror(task
, status
);
1710 * 1c. Retry reserving an RPC call slot
1713 call_retry_reserve(struct rpc_task
*task
)
1715 dprint_status(task
);
1717 task
->tk_status
= 0;
1718 task
->tk_action
= call_reserveresult
;
1719 xprt_retry_reserve(task
);
1723 * 2. Bind and/or refresh the credentials
1726 call_refresh(struct rpc_task
*task
)
1728 dprint_status(task
);
1730 task
->tk_action
= call_refreshresult
;
1731 task
->tk_status
= 0;
1732 task
->tk_client
->cl_stats
->rpcauthrefresh
++;
1733 rpcauth_refreshcred(task
);
1737 * 2a. Process the results of a credential refresh
1740 call_refreshresult(struct rpc_task
*task
)
1742 int status
= task
->tk_status
;
1744 dprint_status(task
);
1746 task
->tk_status
= 0;
1747 task
->tk_action
= call_refresh
;
1750 if (rpcauth_uptodatecred(task
)) {
1751 task
->tk_action
= call_allocate
;
1754 /* Use rate-limiting and a max number of retries if refresh
1755 * had status 0 but failed to update the cred.
1759 rpc_delay(task
, 3*HZ
);
1765 if (!task
->tk_cred_retry
)
1767 task
->tk_cred_retry
--;
1768 dprintk("RPC: %5u %s: retry refresh creds\n",
1769 task
->tk_pid
, __func__
);
1772 dprintk("RPC: %5u %s: refresh creds failed with error %d\n",
1773 task
->tk_pid
, __func__
, status
);
1774 rpc_call_rpcerror(task
, status
);
1778 * 2b. Allocate the buffer. For details, see sched.c:rpc_malloc.
1779 * (Note: buffer memory is freed in xprt_release).
1782 call_allocate(struct rpc_task
*task
)
1784 const struct rpc_auth
*auth
= task
->tk_rqstp
->rq_cred
->cr_auth
;
1785 struct rpc_rqst
*req
= task
->tk_rqstp
;
1786 struct rpc_xprt
*xprt
= req
->rq_xprt
;
1787 const struct rpc_procinfo
*proc
= task
->tk_msg
.rpc_proc
;
1790 dprint_status(task
);
1792 task
->tk_status
= 0;
1793 task
->tk_action
= call_encode
;
1798 if (proc
->p_proc
!= 0) {
1799 BUG_ON(proc
->p_arglen
== 0);
1800 if (proc
->p_decode
!= NULL
)
1801 BUG_ON(proc
->p_replen
== 0);
1805 * Calculate the size (in quads) of the RPC call
1806 * and reply headers, and convert both values
1809 req
->rq_callsize
= RPC_CALLHDRSIZE
+ (auth
->au_cslack
<< 1) +
1811 req
->rq_callsize
<<= 2;
1813 * Note: the reply buffer must at minimum allocate enough space
1814 * for the 'struct accepted_reply' from RFC5531.
1816 req
->rq_rcvsize
= RPC_REPHDRSIZE
+ auth
->au_rslack
+ \
1817 max_t(size_t, proc
->p_replen
, 2);
1818 req
->rq_rcvsize
<<= 2;
1820 status
= xprt
->ops
->buf_alloc(task
);
1821 xprt_inject_disconnect(xprt
);
1824 if (status
!= -ENOMEM
) {
1825 rpc_call_rpcerror(task
, status
);
1829 dprintk("RPC: %5u rpc_buffer allocation failed\n", task
->tk_pid
);
1831 if (RPC_IS_ASYNC(task
) || !fatal_signal_pending(current
)) {
1832 task
->tk_action
= call_allocate
;
1833 rpc_delay(task
, HZ
>>4);
1837 rpc_call_rpcerror(task
, -ERESTARTSYS
);
1841 rpc_task_need_encode(struct rpc_task
*task
)
1843 return test_bit(RPC_TASK_NEED_XMIT
, &task
->tk_runstate
) == 0 &&
1844 (!(task
->tk_flags
& RPC_TASK_SENT
) ||
1845 !(task
->tk_flags
& RPC_TASK_NO_RETRANS_TIMEOUT
) ||
1846 xprt_request_need_retransmit(task
));
1850 rpc_xdr_encode(struct rpc_task
*task
)
1852 struct rpc_rqst
*req
= task
->tk_rqstp
;
1853 struct xdr_stream xdr
;
1855 xdr_buf_init(&req
->rq_snd_buf
,
1858 xdr_buf_init(&req
->rq_rcv_buf
,
1862 req
->rq_reply_bytes_recvd
= 0;
1863 req
->rq_snd_buf
.head
[0].iov_len
= 0;
1864 xdr_init_encode(&xdr
, &req
->rq_snd_buf
,
1865 req
->rq_snd_buf
.head
[0].iov_base
, req
);
1866 xdr_free_bvec(&req
->rq_snd_buf
);
1867 if (rpc_encode_header(task
, &xdr
))
1870 task
->tk_status
= rpcauth_wrap_req(task
, &xdr
);
1874 * 3. Encode arguments of an RPC call
1877 call_encode(struct rpc_task
*task
)
1879 if (!rpc_task_need_encode(task
))
1881 dprint_status(task
);
1882 /* Dequeue task from the receive queue while we're encoding */
1883 xprt_request_dequeue_xprt(task
);
1884 /* Encode here so that rpcsec_gss can use correct sequence number. */
1885 rpc_xdr_encode(task
);
1886 /* Did the encode result in an error condition? */
1887 if (task
->tk_status
!= 0) {
1888 /* Was the error nonfatal? */
1889 switch (task
->tk_status
) {
1892 rpc_delay(task
, HZ
>> 4);
1895 if (!task
->tk_cred_retry
) {
1896 rpc_exit(task
, task
->tk_status
);
1898 task
->tk_action
= call_refresh
;
1899 task
->tk_cred_retry
--;
1900 dprintk("RPC: %5u %s: retry refresh creds\n",
1901 task
->tk_pid
, __func__
);
1905 rpc_call_rpcerror(task
, task
->tk_status
);
1910 /* Add task to reply queue before transmission to avoid races */
1911 if (rpc_reply_expected(task
))
1912 xprt_request_enqueue_receive(task
);
1913 xprt_request_enqueue_transmit(task
);
1915 task
->tk_action
= call_transmit
;
1916 /* Check that the connection is OK */
1917 if (!xprt_bound(task
->tk_xprt
))
1918 task
->tk_action
= call_bind
;
1919 else if (!xprt_connected(task
->tk_xprt
))
1920 task
->tk_action
= call_connect
;
1924 * Helpers to check if the task was already transmitted, and
1925 * to take action when that is the case.
1928 rpc_task_transmitted(struct rpc_task
*task
)
1930 return !test_bit(RPC_TASK_NEED_XMIT
, &task
->tk_runstate
);
1934 rpc_task_handle_transmitted(struct rpc_task
*task
)
1936 xprt_end_transmit(task
);
1937 task
->tk_action
= call_transmit_status
;
1941 * 4. Get the server port number if not yet set
1944 call_bind(struct rpc_task
*task
)
1946 struct rpc_xprt
*xprt
= task
->tk_rqstp
->rq_xprt
;
1948 if (rpc_task_transmitted(task
)) {
1949 rpc_task_handle_transmitted(task
);
1953 if (xprt_bound(xprt
)) {
1954 task
->tk_action
= call_connect
;
1958 dprint_status(task
);
1960 task
->tk_action
= call_bind_status
;
1961 if (!xprt_prepare_transmit(task
))
1964 xprt
->ops
->rpcbind(task
);
1968 * 4a. Sort out bind result
1971 call_bind_status(struct rpc_task
*task
)
1973 struct rpc_xprt
*xprt
= task
->tk_rqstp
->rq_xprt
;
1976 if (rpc_task_transmitted(task
)) {
1977 rpc_task_handle_transmitted(task
);
1981 dprint_status(task
);
1982 trace_rpc_bind_status(task
);
1983 if (task
->tk_status
>= 0)
1985 if (xprt_bound(xprt
)) {
1986 task
->tk_status
= 0;
1990 switch (task
->tk_status
) {
1992 dprintk("RPC: %5u rpcbind out of memory\n", task
->tk_pid
);
1993 rpc_delay(task
, HZ
>> 2);
1996 dprintk("RPC: %5u remote rpcbind: RPC program/version "
1997 "unavailable\n", task
->tk_pid
);
1998 /* fail immediately if this is an RPC ping */
1999 if (task
->tk_msg
.rpc_proc
->p_proc
== 0) {
2000 status
= -EOPNOTSUPP
;
2003 if (task
->tk_rebind_retry
== 0)
2005 task
->tk_rebind_retry
--;
2006 rpc_delay(task
, 3*HZ
);
2009 rpc_delay(task
, HZ
>> 2);
2014 dprintk("RPC: %5u rpcbind request timed out\n",
2018 /* server doesn't support any rpcbind version we know of */
2019 dprintk("RPC: %5u unrecognized remote rpcbind service\n",
2022 case -EPROTONOSUPPORT
:
2023 dprintk("RPC: %5u remote rpcbind version unavailable, retrying\n",
2026 case -ECONNREFUSED
: /* connection problems */
2035 dprintk("RPC: %5u remote rpcbind unreachable: %d\n",
2036 task
->tk_pid
, task
->tk_status
);
2037 if (!RPC_IS_SOFTCONN(task
)) {
2038 rpc_delay(task
, 5*HZ
);
2041 status
= task
->tk_status
;
2044 dprintk("RPC: %5u unrecognized rpcbind error (%d)\n",
2045 task
->tk_pid
, -task
->tk_status
);
2048 rpc_call_rpcerror(task
, status
);
2051 task
->tk_action
= call_connect
;
2054 task
->tk_status
= 0;
2055 task
->tk_action
= call_bind
;
2056 rpc_check_timeout(task
);
2060 * 4b. Connect to the RPC server
2063 call_connect(struct rpc_task
*task
)
2065 struct rpc_xprt
*xprt
= task
->tk_rqstp
->rq_xprt
;
2067 if (rpc_task_transmitted(task
)) {
2068 rpc_task_handle_transmitted(task
);
2072 if (xprt_connected(xprt
)) {
2073 task
->tk_action
= call_transmit
;
2077 dprintk("RPC: %5u call_connect xprt %p %s connected\n",
2079 (xprt_connected(xprt
) ? "is" : "is not"));
2081 task
->tk_action
= call_connect_status
;
2082 if (task
->tk_status
< 0)
2084 if (task
->tk_flags
& RPC_TASK_NOCONNECT
) {
2085 rpc_call_rpcerror(task
, -ENOTCONN
);
2088 if (!xprt_prepare_transmit(task
))
2094 * 4c. Sort out connect result
2097 call_connect_status(struct rpc_task
*task
)
2099 struct rpc_xprt
*xprt
= task
->tk_rqstp
->rq_xprt
;
2100 struct rpc_clnt
*clnt
= task
->tk_client
;
2101 int status
= task
->tk_status
;
2103 if (rpc_task_transmitted(task
)) {
2104 rpc_task_handle_transmitted(task
);
2108 dprint_status(task
);
2109 trace_rpc_connect_status(task
);
2111 if (task
->tk_status
== 0) {
2112 clnt
->cl_stats
->netreconn
++;
2115 if (xprt_connected(xprt
)) {
2116 task
->tk_status
= 0;
2120 task
->tk_status
= 0;
2123 /* A positive refusal suggests a rebind is needed. */
2124 if (RPC_IS_SOFTCONN(task
))
2126 if (clnt
->cl_autobind
) {
2127 rpc_force_rebind(clnt
);
2138 xprt_conditional_disconnect(task
->tk_rqstp
->rq_xprt
,
2139 task
->tk_rqstp
->rq_connect_cookie
);
2140 if (RPC_IS_SOFTCONN(task
))
2142 /* retry with existing socket, after a delay */
2143 rpc_delay(task
, 3*HZ
);
2151 rpc_delay(task
, HZ
>> 2);
2154 rpc_call_rpcerror(task
, status
);
2157 task
->tk_action
= call_transmit
;
2160 /* Check for timeouts before looping back to call_bind */
2161 task
->tk_action
= call_bind
;
2162 rpc_check_timeout(task
);
2166 * 5. Transmit the RPC request, and wait for reply
2169 call_transmit(struct rpc_task
*task
)
2171 if (rpc_task_transmitted(task
)) {
2172 rpc_task_handle_transmitted(task
);
2176 dprint_status(task
);
2178 task
->tk_action
= call_transmit_status
;
2179 if (!xprt_prepare_transmit(task
))
2181 task
->tk_status
= 0;
2182 if (test_bit(RPC_TASK_NEED_XMIT
, &task
->tk_runstate
)) {
2183 if (!xprt_connected(task
->tk_xprt
)) {
2184 task
->tk_status
= -ENOTCONN
;
2187 xprt_transmit(task
);
2189 xprt_end_transmit(task
);
2193 * 5a. Handle cleanup after a transmission
2196 call_transmit_status(struct rpc_task
*task
)
2198 task
->tk_action
= call_status
;
2201 * Common case: success. Force the compiler to put this
2204 if (rpc_task_transmitted(task
)) {
2205 task
->tk_status
= 0;
2206 xprt_request_wait_receive(task
);
2210 switch (task
->tk_status
) {
2212 dprint_status(task
);
2215 task
->tk_status
= 0;
2216 task
->tk_action
= call_encode
;
2219 * Special cases: if we've been waiting on the
2220 * socket's write_space() callback, or if the
2221 * socket just returned a connection error,
2222 * then hold onto the transport lock.
2225 rpc_delay(task
, HZ
>>2);
2229 task
->tk_action
= call_transmit
;
2230 task
->tk_status
= 0;
2238 if (RPC_IS_SOFTCONN(task
)) {
2239 if (!task
->tk_msg
.rpc_proc
->p_proc
)
2240 trace_xprt_ping(task
->tk_xprt
,
2242 rpc_call_rpcerror(task
, task
->tk_status
);
2251 task
->tk_action
= call_bind
;
2252 task
->tk_status
= 0;
2255 rpc_check_timeout(task
);
2258 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
2259 static void call_bc_transmit(struct rpc_task
*task
);
2260 static void call_bc_transmit_status(struct rpc_task
*task
);
2263 call_bc_encode(struct rpc_task
*task
)
2265 xprt_request_enqueue_transmit(task
);
2266 task
->tk_action
= call_bc_transmit
;
2270 * 5b. Send the backchannel RPC reply. On error, drop the reply. In
2271 * addition, disconnect on connectivity errors.
2274 call_bc_transmit(struct rpc_task
*task
)
2276 task
->tk_action
= call_bc_transmit_status
;
2277 if (test_bit(RPC_TASK_NEED_XMIT
, &task
->tk_runstate
)) {
2278 if (!xprt_prepare_transmit(task
))
2280 task
->tk_status
= 0;
2281 xprt_transmit(task
);
2283 xprt_end_transmit(task
);
2287 call_bc_transmit_status(struct rpc_task
*task
)
2289 struct rpc_rqst
*req
= task
->tk_rqstp
;
2291 if (rpc_task_transmitted(task
))
2292 task
->tk_status
= 0;
2294 dprint_status(task
);
2296 switch (task
->tk_status
) {
2310 rpc_delay(task
, HZ
>>2);
2314 task
->tk_status
= 0;
2315 task
->tk_action
= call_bc_transmit
;
2319 * Problem reaching the server. Disconnect and let the
2320 * forechannel reestablish the connection. The server will
2321 * have to retransmit the backchannel request and we'll
2322 * reprocess it. Since these ops are idempotent, there's no
2323 * need to cache our reply at this time.
2325 printk(KERN_NOTICE
"RPC: Could not send backchannel reply "
2326 "error: %d\n", task
->tk_status
);
2327 xprt_conditional_disconnect(req
->rq_xprt
,
2328 req
->rq_connect_cookie
);
2332 * We were unable to reply and will have to drop the
2333 * request. The server should reconnect and retransmit.
2335 printk(KERN_NOTICE
"RPC: Could not send backchannel reply "
2336 "error: %d\n", task
->tk_status
);
2339 task
->tk_action
= rpc_exit_task
;
2341 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
2344 * 6. Sort out the RPC call status
2347 call_status(struct rpc_task
*task
)
2349 struct rpc_clnt
*clnt
= task
->tk_client
;
2352 if (!task
->tk_msg
.rpc_proc
->p_proc
)
2353 trace_xprt_ping(task
->tk_xprt
, task
->tk_status
);
2355 dprint_status(task
);
2357 status
= task
->tk_status
;
2359 task
->tk_action
= call_decode
;
2363 trace_rpc_call_status(task
);
2364 task
->tk_status
= 0;
2371 if (RPC_IS_SOFTCONN(task
))
2374 * Delay any retries for 3 seconds, then handle as if it
2377 rpc_delay(task
, 3*HZ
);
2385 rpc_force_rebind(clnt
);
2388 rpc_delay(task
, 3*HZ
);
2394 /* shutdown or soft timeout */
2397 if (clnt
->cl_chatty
)
2398 printk("%s: RPC call returned error %d\n",
2399 clnt
->cl_program
->name
, -status
);
2402 task
->tk_action
= call_encode
;
2403 rpc_check_timeout(task
);
2406 rpc_call_rpcerror(task
, status
);
2410 rpc_check_connected(const struct rpc_rqst
*req
)
2412 /* No allocated request or transport? return true */
2413 if (!req
|| !req
->rq_xprt
)
2415 return xprt_connected(req
->rq_xprt
);
2419 rpc_check_timeout(struct rpc_task
*task
)
2421 struct rpc_clnt
*clnt
= task
->tk_client
;
2423 if (xprt_adjust_timeout(task
->tk_rqstp
) == 0)
2426 dprintk("RPC: %5u call_timeout (major)\n", task
->tk_pid
);
2427 task
->tk_timeouts
++;
2429 if (RPC_IS_SOFTCONN(task
) && !rpc_check_connected(task
->tk_rqstp
)) {
2430 rpc_call_rpcerror(task
, -ETIMEDOUT
);
2434 if (RPC_IS_SOFT(task
)) {
2436 * Once a "no retrans timeout" soft tasks (a.k.a NFSv4) has
2437 * been sent, it should time out only if the transport
2438 * connection gets terminally broken.
2440 if ((task
->tk_flags
& RPC_TASK_NO_RETRANS_TIMEOUT
) &&
2441 rpc_check_connected(task
->tk_rqstp
))
2444 if (clnt
->cl_chatty
) {
2445 pr_notice_ratelimited(
2446 "%s: server %s not responding, timed out\n",
2447 clnt
->cl_program
->name
,
2448 task
->tk_xprt
->servername
);
2450 if (task
->tk_flags
& RPC_TASK_TIMEOUT
)
2451 rpc_call_rpcerror(task
, -ETIMEDOUT
);
2453 __rpc_call_rpcerror(task
, -EIO
, -ETIMEDOUT
);
2457 if (!(task
->tk_flags
& RPC_CALL_MAJORSEEN
)) {
2458 task
->tk_flags
|= RPC_CALL_MAJORSEEN
;
2459 if (clnt
->cl_chatty
) {
2460 pr_notice_ratelimited(
2461 "%s: server %s not responding, still trying\n",
2462 clnt
->cl_program
->name
,
2463 task
->tk_xprt
->servername
);
2466 rpc_force_rebind(clnt
);
2468 * Did our request time out due to an RPCSEC_GSS out-of-sequence
2469 * event? RFC2203 requires the server to drop all such requests.
2471 rpcauth_invalcred(task
);
2475 * 7. Decode the RPC reply
2478 call_decode(struct rpc_task
*task
)
2480 struct rpc_clnt
*clnt
= task
->tk_client
;
2481 struct rpc_rqst
*req
= task
->tk_rqstp
;
2482 struct xdr_stream xdr
;
2485 dprint_status(task
);
2487 if (!task
->tk_msg
.rpc_proc
->p_decode
) {
2488 task
->tk_action
= rpc_exit_task
;
2492 if (task
->tk_flags
& RPC_CALL_MAJORSEEN
) {
2493 if (clnt
->cl_chatty
) {
2494 pr_notice_ratelimited("%s: server %s OK\n",
2495 clnt
->cl_program
->name
,
2496 task
->tk_xprt
->servername
);
2498 task
->tk_flags
&= ~RPC_CALL_MAJORSEEN
;
2502 * Ensure that we see all writes made by xprt_complete_rqst()
2503 * before it changed req->rq_reply_bytes_recvd.
2508 * Did we ever call xprt_complete_rqst()? If not, we should assume
2509 * the message is incomplete.
2512 if (!req
->rq_reply_bytes_recvd
)
2515 req
->rq_rcv_buf
.len
= req
->rq_private_buf
.len
;
2516 trace_xprt_recvfrom(&req
->rq_rcv_buf
);
2518 /* Check that the softirq receive buffer is valid */
2519 WARN_ON(memcmp(&req
->rq_rcv_buf
, &req
->rq_private_buf
,
2520 sizeof(req
->rq_rcv_buf
)) != 0);
2522 xdr_init_decode(&xdr
, &req
->rq_rcv_buf
,
2523 req
->rq_rcv_buf
.head
[0].iov_base
, req
);
2524 err
= rpc_decode_header(task
, &xdr
);
2528 task
->tk_action
= rpc_exit_task
;
2529 task
->tk_status
= rpcauth_unwrap_resp(task
, &xdr
);
2530 dprintk("RPC: %5u %s result %d\n",
2531 task
->tk_pid
, __func__
, task
->tk_status
);
2534 task
->tk_status
= 0;
2535 if (task
->tk_client
->cl_discrtry
)
2536 xprt_conditional_disconnect(req
->rq_xprt
,
2537 req
->rq_connect_cookie
);
2538 task
->tk_action
= call_encode
;
2539 rpc_check_timeout(task
);
2542 task
->tk_action
= call_reserve
;
2543 rpc_check_timeout(task
);
2544 rpcauth_invalcred(task
);
2545 /* Ensure we obtain a new XID if we retry! */
2551 rpc_encode_header(struct rpc_task
*task
, struct xdr_stream
*xdr
)
2553 struct rpc_clnt
*clnt
= task
->tk_client
;
2554 struct rpc_rqst
*req
= task
->tk_rqstp
;
2559 p
= xdr_reserve_space(xdr
, RPC_CALLHDRSIZE
<< 2);
2564 *p
++ = cpu_to_be32(RPC_VERSION
);
2565 *p
++ = cpu_to_be32(clnt
->cl_prog
);
2566 *p
++ = cpu_to_be32(clnt
->cl_vers
);
2567 *p
= cpu_to_be32(task
->tk_msg
.rpc_proc
->p_proc
);
2569 error
= rpcauth_marshcred(task
, xdr
);
2574 trace_rpc_bad_callhdr(task
);
2575 rpc_call_rpcerror(task
, error
);
2580 rpc_decode_header(struct rpc_task
*task
, struct xdr_stream
*xdr
)
2582 struct rpc_clnt
*clnt
= task
->tk_client
;
2586 /* RFC-1014 says that the representation of XDR data must be a
2587 * multiple of four bytes
2588 * - if it isn't pointer subtraction in the NFS client may give
2591 if (task
->tk_rqstp
->rq_rcv_buf
.len
& 3)
2592 goto out_unparsable
;
2594 p
= xdr_inline_decode(xdr
, 3 * sizeof(*p
));
2596 goto out_unparsable
;
2598 if (*p
++ != rpc_reply
)
2599 goto out_unparsable
;
2600 if (*p
++ != rpc_msg_accepted
)
2601 goto out_msg_denied
;
2603 error
= rpcauth_checkverf(task
, xdr
);
2607 p
= xdr_inline_decode(xdr
, sizeof(*p
));
2609 goto out_unparsable
;
2613 case rpc_prog_unavail
:
2614 trace_rpc__prog_unavail(task
);
2615 error
= -EPFNOSUPPORT
;
2617 case rpc_prog_mismatch
:
2618 trace_rpc__prog_mismatch(task
);
2619 error
= -EPROTONOSUPPORT
;
2621 case rpc_proc_unavail
:
2622 trace_rpc__proc_unavail(task
);
2623 error
= -EOPNOTSUPP
;
2625 case rpc_garbage_args
:
2626 case rpc_system_err
:
2627 trace_rpc__garbage_args(task
);
2631 goto out_unparsable
;
2635 clnt
->cl_stats
->rpcgarbage
++;
2636 if (task
->tk_garb_retry
) {
2637 task
->tk_garb_retry
--;
2638 task
->tk_action
= call_encode
;
2642 rpc_call_rpcerror(task
, error
);
2646 trace_rpc__unparsable(task
);
2651 trace_rpc_bad_verifier(task
);
2656 p
= xdr_inline_decode(xdr
, sizeof(*p
));
2658 goto out_unparsable
;
2660 case rpc_auth_error
:
2663 trace_rpc__mismatch(task
);
2664 error
= -EPROTONOSUPPORT
;
2667 goto out_unparsable
;
2670 p
= xdr_inline_decode(xdr
, sizeof(*p
));
2672 goto out_unparsable
;
2674 case rpc_autherr_rejectedcred
:
2675 case rpc_autherr_rejectedverf
:
2676 case rpcsec_gsserr_credproblem
:
2677 case rpcsec_gsserr_ctxproblem
:
2678 if (!task
->tk_cred_retry
)
2680 task
->tk_cred_retry
--;
2681 trace_rpc__stale_creds(task
);
2682 return -EKEYREJECTED
;
2683 case rpc_autherr_badcred
:
2684 case rpc_autherr_badverf
:
2685 /* possibly garbled cred/verf? */
2686 if (!task
->tk_garb_retry
)
2688 task
->tk_garb_retry
--;
2689 trace_rpc__bad_creds(task
);
2690 task
->tk_action
= call_encode
;
2692 case rpc_autherr_tooweak
:
2693 trace_rpc__auth_tooweak(task
);
2694 pr_warn("RPC: server %s requires stronger authentication.\n",
2695 task
->tk_xprt
->servername
);
2698 goto out_unparsable
;
2703 static void rpcproc_encode_null(struct rpc_rqst
*rqstp
, struct xdr_stream
*xdr
,
2708 static int rpcproc_decode_null(struct rpc_rqst
*rqstp
, struct xdr_stream
*xdr
,
2714 static const struct rpc_procinfo rpcproc_null
= {
2715 .p_encode
= rpcproc_encode_null
,
2716 .p_decode
= rpcproc_decode_null
,
2719 static int rpc_ping(struct rpc_clnt
*clnt
)
2721 struct rpc_message msg
= {
2722 .rpc_proc
= &rpcproc_null
,
2725 err
= rpc_call_sync(clnt
, &msg
, RPC_TASK_SOFT
| RPC_TASK_SOFTCONN
|
2726 RPC_TASK_NULLCREDS
);
2731 struct rpc_task
*rpc_call_null_helper(struct rpc_clnt
*clnt
,
2732 struct rpc_xprt
*xprt
, struct rpc_cred
*cred
, int flags
,
2733 const struct rpc_call_ops
*ops
, void *data
)
2735 struct rpc_message msg
= {
2736 .rpc_proc
= &rpcproc_null
,
2738 struct rpc_task_setup task_setup_data
= {
2741 .rpc_message
= &msg
,
2742 .rpc_op_cred
= cred
,
2743 .callback_ops
= (ops
!= NULL
) ? ops
: &rpc_default_ops
,
2744 .callback_data
= data
,
2745 .flags
= flags
| RPC_TASK_NULLCREDS
,
2748 return rpc_run_task(&task_setup_data
);
2751 struct rpc_task
*rpc_call_null(struct rpc_clnt
*clnt
, struct rpc_cred
*cred
, int flags
)
2753 return rpc_call_null_helper(clnt
, NULL
, cred
, flags
, NULL
, NULL
);
2755 EXPORT_SYMBOL_GPL(rpc_call_null
);
2757 struct rpc_cb_add_xprt_calldata
{
2758 struct rpc_xprt_switch
*xps
;
2759 struct rpc_xprt
*xprt
;
2762 static void rpc_cb_add_xprt_done(struct rpc_task
*task
, void *calldata
)
2764 struct rpc_cb_add_xprt_calldata
*data
= calldata
;
2766 if (task
->tk_status
== 0)
2767 rpc_xprt_switch_add_xprt(data
->xps
, data
->xprt
);
2770 static void rpc_cb_add_xprt_release(void *calldata
)
2772 struct rpc_cb_add_xprt_calldata
*data
= calldata
;
2774 xprt_put(data
->xprt
);
2775 xprt_switch_put(data
->xps
);
2779 static const struct rpc_call_ops rpc_cb_add_xprt_call_ops
= {
2780 .rpc_call_done
= rpc_cb_add_xprt_done
,
2781 .rpc_release
= rpc_cb_add_xprt_release
,
2785 * rpc_clnt_test_and_add_xprt - Test and add a new transport to a rpc_clnt
2786 * @clnt: pointer to struct rpc_clnt
2787 * @xps: pointer to struct rpc_xprt_switch,
2788 * @xprt: pointer struct rpc_xprt
2791 int rpc_clnt_test_and_add_xprt(struct rpc_clnt
*clnt
,
2792 struct rpc_xprt_switch
*xps
, struct rpc_xprt
*xprt
,
2795 struct rpc_cb_add_xprt_calldata
*data
;
2796 struct rpc_task
*task
;
2798 data
= kmalloc(sizeof(*data
), GFP_NOFS
);
2801 data
->xps
= xprt_switch_get(xps
);
2802 data
->xprt
= xprt_get(xprt
);
2803 if (rpc_xprt_switch_has_addr(data
->xps
, (struct sockaddr
*)&xprt
->addr
)) {
2804 rpc_cb_add_xprt_release(data
);
2808 task
= rpc_call_null_helper(clnt
, xprt
, NULL
,
2809 RPC_TASK_SOFT
|RPC_TASK_SOFTCONN
|RPC_TASK_ASYNC
|RPC_TASK_NULLCREDS
,
2810 &rpc_cb_add_xprt_call_ops
, data
);
2812 return PTR_ERR(task
);
2817 EXPORT_SYMBOL_GPL(rpc_clnt_test_and_add_xprt
);
2820 * rpc_clnt_setup_test_and_add_xprt()
2822 * This is an rpc_clnt_add_xprt setup() function which returns 1 so:
2823 * 1) caller of the test function must dereference the rpc_xprt_switch
2825 * 2) test function must call rpc_xprt_switch_add_xprt, usually in
2826 * the rpc_call_done routine.
2828 * Upon success (return of 1), the test function adds the new
2829 * transport to the rpc_clnt xprt switch
2831 * @clnt: struct rpc_clnt to get the new transport
2832 * @xps: the rpc_xprt_switch to hold the new transport
2833 * @xprt: the rpc_xprt to test
2834 * @data: a struct rpc_add_xprt_test pointer that holds the test function
2835 * and test function call data
2837 int rpc_clnt_setup_test_and_add_xprt(struct rpc_clnt
*clnt
,
2838 struct rpc_xprt_switch
*xps
,
2839 struct rpc_xprt
*xprt
,
2842 struct rpc_task
*task
;
2843 struct rpc_add_xprt_test
*xtest
= (struct rpc_add_xprt_test
*)data
;
2844 int status
= -EADDRINUSE
;
2846 xprt
= xprt_get(xprt
);
2847 xprt_switch_get(xps
);
2849 if (rpc_xprt_switch_has_addr(xps
, (struct sockaddr
*)&xprt
->addr
))
2852 /* Test the connection */
2853 task
= rpc_call_null_helper(clnt
, xprt
, NULL
,
2854 RPC_TASK_SOFT
| RPC_TASK_SOFTCONN
| RPC_TASK_NULLCREDS
,
2857 status
= PTR_ERR(task
);
2860 status
= task
->tk_status
;
2866 /* rpc_xprt_switch and rpc_xprt are deferrenced by add_xprt_test() */
2867 xtest
->add_xprt_test(clnt
, xprt
, xtest
->data
);
2870 xprt_switch_put(xps
);
2872 /* so that rpc_clnt_add_xprt does not call rpc_xprt_switch_add_xprt */
2876 xprt_switch_put(xps
);
2877 pr_info("RPC: rpc_clnt_test_xprt failed: %d addr %s not added\n",
2878 status
, xprt
->address_strings
[RPC_DISPLAY_ADDR
]);
2881 EXPORT_SYMBOL_GPL(rpc_clnt_setup_test_and_add_xprt
);
2884 * rpc_clnt_add_xprt - Add a new transport to a rpc_clnt
2885 * @clnt: pointer to struct rpc_clnt
2886 * @xprtargs: pointer to struct xprt_create
2887 * @setup: callback to test and/or set up the connection
2888 * @data: pointer to setup function data
2890 * Creates a new transport using the parameters set in args and
2892 * If ping is set, then test that connectivity succeeds before
2893 * adding the new transport.
2896 int rpc_clnt_add_xprt(struct rpc_clnt
*clnt
,
2897 struct xprt_create
*xprtargs
,
2898 int (*setup
)(struct rpc_clnt
*,
2899 struct rpc_xprt_switch
*,
2904 struct rpc_xprt_switch
*xps
;
2905 struct rpc_xprt
*xprt
;
2906 unsigned long connect_timeout
;
2907 unsigned long reconnect_timeout
;
2908 unsigned char resvport
, reuseport
;
2912 xps
= xprt_switch_get(rcu_dereference(clnt
->cl_xpi
.xpi_xpswitch
));
2913 xprt
= xprt_iter_xprt(&clnt
->cl_xpi
);
2914 if (xps
== NULL
|| xprt
== NULL
) {
2916 xprt_switch_put(xps
);
2919 resvport
= xprt
->resvport
;
2920 reuseport
= xprt
->reuseport
;
2921 connect_timeout
= xprt
->connect_timeout
;
2922 reconnect_timeout
= xprt
->max_reconnect_timeout
;
2925 xprt
= xprt_create_transport(xprtargs
);
2927 ret
= PTR_ERR(xprt
);
2928 goto out_put_switch
;
2930 xprt
->resvport
= resvport
;
2931 xprt
->reuseport
= reuseport
;
2932 if (xprt
->ops
->set_connect_timeout
!= NULL
)
2933 xprt
->ops
->set_connect_timeout(xprt
,
2937 rpc_xprt_switch_set_roundrobin(xps
);
2939 ret
= setup(clnt
, xps
, xprt
, data
);
2943 rpc_xprt_switch_add_xprt(xps
, xprt
);
2947 xprt_switch_put(xps
);
2950 EXPORT_SYMBOL_GPL(rpc_clnt_add_xprt
);
2952 struct connect_timeout_data
{
2953 unsigned long connect_timeout
;
2954 unsigned long reconnect_timeout
;
2958 rpc_xprt_set_connect_timeout(struct rpc_clnt
*clnt
,
2959 struct rpc_xprt
*xprt
,
2962 struct connect_timeout_data
*timeo
= data
;
2964 if (xprt
->ops
->set_connect_timeout
)
2965 xprt
->ops
->set_connect_timeout(xprt
,
2966 timeo
->connect_timeout
,
2967 timeo
->reconnect_timeout
);
2972 rpc_set_connect_timeout(struct rpc_clnt
*clnt
,
2973 unsigned long connect_timeout
,
2974 unsigned long reconnect_timeout
)
2976 struct connect_timeout_data timeout
= {
2977 .connect_timeout
= connect_timeout
,
2978 .reconnect_timeout
= reconnect_timeout
,
2980 rpc_clnt_iterate_for_each_xprt(clnt
,
2981 rpc_xprt_set_connect_timeout
,
2984 EXPORT_SYMBOL_GPL(rpc_set_connect_timeout
);
2986 void rpc_clnt_xprt_switch_put(struct rpc_clnt
*clnt
)
2989 xprt_switch_put(rcu_dereference(clnt
->cl_xpi
.xpi_xpswitch
));
2992 EXPORT_SYMBOL_GPL(rpc_clnt_xprt_switch_put
);
2994 void rpc_clnt_xprt_switch_add_xprt(struct rpc_clnt
*clnt
, struct rpc_xprt
*xprt
)
2997 rpc_xprt_switch_add_xprt(rcu_dereference(clnt
->cl_xpi
.xpi_xpswitch
),
3001 EXPORT_SYMBOL_GPL(rpc_clnt_xprt_switch_add_xprt
);
3003 bool rpc_clnt_xprt_switch_has_addr(struct rpc_clnt
*clnt
,
3004 const struct sockaddr
*sap
)
3006 struct rpc_xprt_switch
*xps
;
3010 xps
= rcu_dereference(clnt
->cl_xpi
.xpi_xpswitch
);
3011 ret
= rpc_xprt_switch_has_addr(xps
, sap
);
3015 EXPORT_SYMBOL_GPL(rpc_clnt_xprt_switch_has_addr
);
3017 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
3018 static void rpc_show_header(void)
3020 printk(KERN_INFO
"-pid- flgs status -client- --rqstp- "
3021 "-timeout ---ops--\n");
3024 static void rpc_show_task(const struct rpc_clnt
*clnt
,
3025 const struct rpc_task
*task
)
3027 const char *rpc_waitq
= "none";
3029 if (RPC_IS_QUEUED(task
))
3030 rpc_waitq
= rpc_qname(task
->tk_waitqueue
);
3032 printk(KERN_INFO
"%5u %04x %6d %8p %8p %8ld %8p %sv%u %s a:%ps q:%s\n",
3033 task
->tk_pid
, task
->tk_flags
, task
->tk_status
,
3034 clnt
, task
->tk_rqstp
, rpc_task_timeout(task
), task
->tk_ops
,
3035 clnt
->cl_program
->name
, clnt
->cl_vers
, rpc_proc_name(task
),
3036 task
->tk_action
, rpc_waitq
);
3039 void rpc_show_tasks(struct net
*net
)
3041 struct rpc_clnt
*clnt
;
3042 struct rpc_task
*task
;
3044 struct sunrpc_net
*sn
= net_generic(net
, sunrpc_net_id
);
3046 spin_lock(&sn
->rpc_client_lock
);
3047 list_for_each_entry(clnt
, &sn
->all_clients
, cl_clients
) {
3048 spin_lock(&clnt
->cl_lock
);
3049 list_for_each_entry(task
, &clnt
->cl_tasks
, tk_task
) {
3054 rpc_show_task(clnt
, task
);
3056 spin_unlock(&clnt
->cl_lock
);
3058 spin_unlock(&sn
->rpc_client_lock
);
3062 #if IS_ENABLED(CONFIG_SUNRPC_SWAP)
3064 rpc_clnt_swap_activate_callback(struct rpc_clnt
*clnt
,
3065 struct rpc_xprt
*xprt
,
3068 return xprt_enable_swap(xprt
);
3072 rpc_clnt_swap_activate(struct rpc_clnt
*clnt
)
3074 if (atomic_inc_return(&clnt
->cl_swapper
) == 1)
3075 return rpc_clnt_iterate_for_each_xprt(clnt
,
3076 rpc_clnt_swap_activate_callback
, NULL
);
3079 EXPORT_SYMBOL_GPL(rpc_clnt_swap_activate
);
3082 rpc_clnt_swap_deactivate_callback(struct rpc_clnt
*clnt
,
3083 struct rpc_xprt
*xprt
,
3086 xprt_disable_swap(xprt
);
3091 rpc_clnt_swap_deactivate(struct rpc_clnt
*clnt
)
3093 if (atomic_dec_if_positive(&clnt
->cl_swapper
) == 0)
3094 rpc_clnt_iterate_for_each_xprt(clnt
,
3095 rpc_clnt_swap_deactivate_callback
, NULL
);
3097 EXPORT_SYMBOL_GPL(rpc_clnt_swap_deactivate
);
3098 #endif /* CONFIG_SUNRPC_SWAP */