2 * linux/net/sunrpc/svc.c
4 * High-level RPC service routines
6 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
8 * Multiple threads pools and NUMAisation
9 * Copyright (c) 2006 Silicon Graphics, Inc.
10 * by Greg Banks <gnb@melbourne.sgi.com>
13 #include <linux/linkage.h>
14 #include <linux/sched.h>
15 #include <linux/errno.h>
16 #include <linux/net.h>
19 #include <linux/interrupt.h>
20 #include <linux/module.h>
21 #include <linux/kthread.h>
22 #include <linux/slab.h>
23 #include <linux/nsproxy.h>
25 #include <linux/sunrpc/types.h>
26 #include <linux/sunrpc/xdr.h>
27 #include <linux/sunrpc/stats.h>
28 #include <linux/sunrpc/svcsock.h>
29 #include <linux/sunrpc/clnt.h>
30 #include <linux/sunrpc/bc_xprt.h>
32 #define RPCDBG_FACILITY RPCDBG_SVCDSP
34 static void svc_unregister(const struct svc_serv
*serv
, struct net
*net
);
36 #define svc_serv_is_pooled(serv) ((serv)->sv_function)
39 * Mode for mapping cpus to pools.
42 SVC_POOL_AUTO
= -1, /* choose one of the others */
43 SVC_POOL_GLOBAL
, /* no mapping, just a single global pool
44 * (legacy & UP mode) */
45 SVC_POOL_PERCPU
, /* one pool per cpu */
46 SVC_POOL_PERNODE
/* one pool per numa node */
48 #define SVC_POOL_DEFAULT SVC_POOL_GLOBAL
51 * Structure for mapping cpus to pools and vice versa.
52 * Setup once during sunrpc initialisation.
54 static struct svc_pool_map
{
55 int count
; /* How many svc_servs use us */
56 int mode
; /* Note: int not enum to avoid
57 * warnings about "enumeration value
58 * not handled in switch" */
60 unsigned int *pool_to
; /* maps pool id to cpu or node */
61 unsigned int *to_pool
; /* maps cpu or node to pool id */
64 .mode
= SVC_POOL_DEFAULT
66 static DEFINE_MUTEX(svc_pool_map_mutex
);/* protects svc_pool_map.count only */
69 param_set_pool_mode(const char *val
, struct kernel_param
*kp
)
71 int *ip
= (int *)kp
->arg
;
72 struct svc_pool_map
*m
= &svc_pool_map
;
75 mutex_lock(&svc_pool_map_mutex
);
82 if (!strncmp(val
, "auto", 4))
84 else if (!strncmp(val
, "global", 6))
85 *ip
= SVC_POOL_GLOBAL
;
86 else if (!strncmp(val
, "percpu", 6))
87 *ip
= SVC_POOL_PERCPU
;
88 else if (!strncmp(val
, "pernode", 7))
89 *ip
= SVC_POOL_PERNODE
;
94 mutex_unlock(&svc_pool_map_mutex
);
99 param_get_pool_mode(char *buf
, struct kernel_param
*kp
)
101 int *ip
= (int *)kp
->arg
;
106 return strlcpy(buf
, "auto", 20);
107 case SVC_POOL_GLOBAL
:
108 return strlcpy(buf
, "global", 20);
109 case SVC_POOL_PERCPU
:
110 return strlcpy(buf
, "percpu", 20);
111 case SVC_POOL_PERNODE
:
112 return strlcpy(buf
, "pernode", 20);
114 return sprintf(buf
, "%d", *ip
);
118 module_param_call(pool_mode
, param_set_pool_mode
, param_get_pool_mode
,
119 &svc_pool_map
.mode
, 0644);
122 * Detect best pool mapping mode heuristically,
123 * according to the machine's topology.
126 svc_pool_map_choose_mode(void)
130 if (nr_online_nodes
> 1) {
132 * Actually have multiple NUMA nodes,
133 * so split pools on NUMA node boundaries
135 return SVC_POOL_PERNODE
;
138 node
= first_online_node
;
139 if (nr_cpus_node(node
) > 2) {
141 * Non-trivial SMP, or CONFIG_NUMA on
142 * non-NUMA hardware, e.g. with a generic
143 * x86_64 kernel on Xeons. In this case we
144 * want to divide the pools on cpu boundaries.
146 return SVC_POOL_PERCPU
;
149 /* default: one global pool */
150 return SVC_POOL_GLOBAL
;
154 * Allocate the to_pool[] and pool_to[] arrays.
155 * Returns 0 on success or an errno.
158 svc_pool_map_alloc_arrays(struct svc_pool_map
*m
, unsigned int maxpools
)
160 m
->to_pool
= kcalloc(maxpools
, sizeof(unsigned int), GFP_KERNEL
);
163 m
->pool_to
= kcalloc(maxpools
, sizeof(unsigned int), GFP_KERNEL
);
177 * Initialise the pool map for SVC_POOL_PERCPU mode.
178 * Returns number of pools or <0 on error.
181 svc_pool_map_init_percpu(struct svc_pool_map
*m
)
183 unsigned int maxpools
= nr_cpu_ids
;
184 unsigned int pidx
= 0;
188 err
= svc_pool_map_alloc_arrays(m
, maxpools
);
192 for_each_online_cpu(cpu
) {
193 BUG_ON(pidx
> maxpools
);
194 m
->to_pool
[cpu
] = pidx
;
195 m
->pool_to
[pidx
] = cpu
;
198 /* cpus brought online later all get mapped to pool0, sorry */
205 * Initialise the pool map for SVC_POOL_PERNODE mode.
206 * Returns number of pools or <0 on error.
209 svc_pool_map_init_pernode(struct svc_pool_map
*m
)
211 unsigned int maxpools
= nr_node_ids
;
212 unsigned int pidx
= 0;
216 err
= svc_pool_map_alloc_arrays(m
, maxpools
);
220 for_each_node_with_cpus(node
) {
221 /* some architectures (e.g. SN2) have cpuless nodes */
222 BUG_ON(pidx
> maxpools
);
223 m
->to_pool
[node
] = pidx
;
224 m
->pool_to
[pidx
] = node
;
227 /* nodes brought online later all get mapped to pool0, sorry */
234 * Add a reference to the global map of cpus to pools (and
235 * vice versa). Initialise the map if we're the first user.
236 * Returns the number of pools.
239 svc_pool_map_get(void)
241 struct svc_pool_map
*m
= &svc_pool_map
;
244 mutex_lock(&svc_pool_map_mutex
);
247 mutex_unlock(&svc_pool_map_mutex
);
251 if (m
->mode
== SVC_POOL_AUTO
)
252 m
->mode
= svc_pool_map_choose_mode();
255 case SVC_POOL_PERCPU
:
256 npools
= svc_pool_map_init_percpu(m
);
258 case SVC_POOL_PERNODE
:
259 npools
= svc_pool_map_init_pernode(m
);
264 /* default, or memory allocation failure */
266 m
->mode
= SVC_POOL_GLOBAL
;
270 mutex_unlock(&svc_pool_map_mutex
);
276 * Drop a reference to the global map of cpus to pools.
277 * When the last reference is dropped, the map data is
278 * freed; this allows the sysadmin to change the pool
279 * mode using the pool_mode module option without
280 * rebooting or re-loading sunrpc.ko.
283 svc_pool_map_put(void)
285 struct svc_pool_map
*m
= &svc_pool_map
;
287 mutex_lock(&svc_pool_map_mutex
);
297 mutex_unlock(&svc_pool_map_mutex
);
301 static int svc_pool_map_get_node(unsigned int pidx
)
303 const struct svc_pool_map
*m
= &svc_pool_map
;
306 if (m
->mode
== SVC_POOL_PERCPU
)
307 return cpu_to_node(m
->pool_to
[pidx
]);
308 if (m
->mode
== SVC_POOL_PERNODE
)
309 return m
->pool_to
[pidx
];
314 * Set the given thread's cpus_allowed mask so that it
315 * will only run on cpus in the given pool.
318 svc_pool_map_set_cpumask(struct task_struct
*task
, unsigned int pidx
)
320 struct svc_pool_map
*m
= &svc_pool_map
;
321 unsigned int node
= m
->pool_to
[pidx
];
324 * The caller checks for sv_nrpools > 1, which
325 * implies that we've been initialized.
327 BUG_ON(m
->count
== 0);
330 case SVC_POOL_PERCPU
:
332 set_cpus_allowed_ptr(task
, cpumask_of(node
));
335 case SVC_POOL_PERNODE
:
337 set_cpus_allowed_ptr(task
, cpumask_of_node(node
));
344 * Use the mapping mode to choose a pool for a given CPU.
345 * Used when enqueueing an incoming RPC. Always returns
346 * a non-NULL pool pointer.
349 svc_pool_for_cpu(struct svc_serv
*serv
, int cpu
)
351 struct svc_pool_map
*m
= &svc_pool_map
;
352 unsigned int pidx
= 0;
355 * An uninitialised map happens in a pure client when
356 * lockd is brought up, so silently treat it the
357 * same as SVC_POOL_GLOBAL.
359 if (svc_serv_is_pooled(serv
)) {
361 case SVC_POOL_PERCPU
:
362 pidx
= m
->to_pool
[cpu
];
364 case SVC_POOL_PERNODE
:
365 pidx
= m
->to_pool
[cpu_to_node(cpu
)];
369 return &serv
->sv_pools
[pidx
% serv
->sv_nrpools
];
372 int svc_rpcb_setup(struct svc_serv
*serv
, struct net
*net
)
376 err
= rpcb_create_local(net
);
380 /* Remove any stale portmap registrations */
381 svc_unregister(serv
, net
);
384 EXPORT_SYMBOL_GPL(svc_rpcb_setup
);
386 void svc_rpcb_cleanup(struct svc_serv
*serv
, struct net
*net
)
388 svc_unregister(serv
, net
);
391 EXPORT_SYMBOL_GPL(svc_rpcb_cleanup
);
393 static int svc_uses_rpcbind(struct svc_serv
*serv
)
395 struct svc_program
*progp
;
398 for (progp
= serv
->sv_program
; progp
; progp
= progp
->pg_next
) {
399 for (i
= 0; i
< progp
->pg_nvers
; i
++) {
400 if (progp
->pg_vers
[i
] == NULL
)
402 if (progp
->pg_vers
[i
]->vs_hidden
== 0)
410 int svc_bind(struct svc_serv
*serv
, struct net
*net
)
412 if (!svc_uses_rpcbind(serv
))
414 return svc_rpcb_setup(serv
, net
);
416 EXPORT_SYMBOL_GPL(svc_bind
);
419 * Create an RPC service
421 static struct svc_serv
*
422 __svc_create(struct svc_program
*prog
, unsigned int bufsize
, int npools
,
423 void (*shutdown
)(struct svc_serv
*serv
, struct net
*net
))
425 struct svc_serv
*serv
;
427 unsigned int xdrsize
;
430 if (!(serv
= kzalloc(sizeof(*serv
), GFP_KERNEL
)))
432 serv
->sv_name
= prog
->pg_name
;
433 serv
->sv_program
= prog
;
434 serv
->sv_nrthreads
= 1;
435 serv
->sv_stats
= prog
->pg_stats
;
436 if (bufsize
> RPCSVC_MAXPAYLOAD
)
437 bufsize
= RPCSVC_MAXPAYLOAD
;
438 serv
->sv_max_payload
= bufsize
? bufsize
: 4096;
439 serv
->sv_max_mesg
= roundup(serv
->sv_max_payload
+ PAGE_SIZE
, PAGE_SIZE
);
440 serv
->sv_shutdown
= shutdown
;
443 prog
->pg_lovers
= prog
->pg_nvers
-1;
444 for (vers
=0; vers
<prog
->pg_nvers
; vers
++)
445 if (prog
->pg_vers
[vers
]) {
446 prog
->pg_hivers
= vers
;
447 if (prog
->pg_lovers
> vers
)
448 prog
->pg_lovers
= vers
;
449 if (prog
->pg_vers
[vers
]->vs_xdrsize
> xdrsize
)
450 xdrsize
= prog
->pg_vers
[vers
]->vs_xdrsize
;
452 prog
= prog
->pg_next
;
454 serv
->sv_xdrsize
= xdrsize
;
455 INIT_LIST_HEAD(&serv
->sv_tempsocks
);
456 INIT_LIST_HEAD(&serv
->sv_permsocks
);
457 init_timer(&serv
->sv_temptimer
);
458 spin_lock_init(&serv
->sv_lock
);
460 serv
->sv_nrpools
= npools
;
462 kcalloc(serv
->sv_nrpools
, sizeof(struct svc_pool
),
464 if (!serv
->sv_pools
) {
469 for (i
= 0; i
< serv
->sv_nrpools
; i
++) {
470 struct svc_pool
*pool
= &serv
->sv_pools
[i
];
472 dprintk("svc: initialising pool %u for %s\n",
476 INIT_LIST_HEAD(&pool
->sp_threads
);
477 INIT_LIST_HEAD(&pool
->sp_sockets
);
478 INIT_LIST_HEAD(&pool
->sp_all_threads
);
479 spin_lock_init(&pool
->sp_lock
);
482 if (svc_uses_rpcbind(serv
) && (!serv
->sv_shutdown
))
483 serv
->sv_shutdown
= svc_rpcb_cleanup
;
489 svc_create(struct svc_program
*prog
, unsigned int bufsize
,
490 void (*shutdown
)(struct svc_serv
*serv
, struct net
*net
))
492 return __svc_create(prog
, bufsize
, /*npools*/1, shutdown
);
494 EXPORT_SYMBOL_GPL(svc_create
);
497 svc_create_pooled(struct svc_program
*prog
, unsigned int bufsize
,
498 void (*shutdown
)(struct svc_serv
*serv
, struct net
*net
),
499 svc_thread_fn func
, struct module
*mod
)
501 struct svc_serv
*serv
;
502 unsigned int npools
= svc_pool_map_get();
504 serv
= __svc_create(prog
, bufsize
, npools
, shutdown
);
507 serv
->sv_function
= func
;
508 serv
->sv_module
= mod
;
513 EXPORT_SYMBOL_GPL(svc_create_pooled
);
515 void svc_shutdown_net(struct svc_serv
*serv
, struct net
*net
)
518 * The set of xprts (contained in the sv_tempsocks and
519 * sv_permsocks lists) is now constant, since it is modified
520 * only by accepting new sockets (done by service threads in
521 * svc_recv) or aging old ones (done by sv_temptimer), or
522 * configuration changes (excluded by whatever locking the
523 * caller is using--nfsd_mutex in the case of nfsd). So it's
524 * safe to traverse those lists and shut everything down:
526 svc_close_net(serv
, net
);
528 if (serv
->sv_shutdown
)
529 serv
->sv_shutdown(serv
, net
);
531 EXPORT_SYMBOL_GPL(svc_shutdown_net
);
534 * Destroy an RPC service. Should be called with appropriate locking to
535 * protect the sv_nrthreads, sv_permsocks and sv_tempsocks.
538 svc_destroy(struct svc_serv
*serv
)
540 dprintk("svc: svc_destroy(%s, %d)\n",
541 serv
->sv_program
->pg_name
,
544 if (serv
->sv_nrthreads
) {
545 if (--(serv
->sv_nrthreads
) != 0) {
546 svc_sock_update_bufs(serv
);
550 printk("svc_destroy: no threads for serv=%p!\n", serv
);
552 del_timer_sync(&serv
->sv_temptimer
);
555 * The last user is gone and thus all sockets have to be destroyed to
556 * the point. Check this.
558 BUG_ON(!list_empty(&serv
->sv_permsocks
));
559 BUG_ON(!list_empty(&serv
->sv_tempsocks
));
561 cache_clean_deferred(serv
);
563 if (svc_serv_is_pooled(serv
))
566 kfree(serv
->sv_pools
);
569 EXPORT_SYMBOL_GPL(svc_destroy
);
572 * Allocate an RPC server's buffer space.
573 * We allocate pages and place them in rq_argpages.
576 svc_init_buffer(struct svc_rqst
*rqstp
, unsigned int size
, int node
)
578 unsigned int pages
, arghi
;
580 /* bc_xprt uses fore channel allocated buffers */
581 if (svc_is_backchannel(rqstp
))
584 pages
= size
/ PAGE_SIZE
+ 1; /* extra page as we hold both request and reply.
585 * We assume one is at most one page
588 BUG_ON(pages
> RPCSVC_MAXPAGES
);
590 struct page
*p
= alloc_pages_node(node
, GFP_KERNEL
, 0);
593 rqstp
->rq_pages
[arghi
++] = p
;
600 * Release an RPC server buffer
603 svc_release_buffer(struct svc_rqst
*rqstp
)
607 for (i
= 0; i
< ARRAY_SIZE(rqstp
->rq_pages
); i
++)
608 if (rqstp
->rq_pages
[i
])
609 put_page(rqstp
->rq_pages
[i
]);
613 svc_prepare_thread(struct svc_serv
*serv
, struct svc_pool
*pool
, int node
)
615 struct svc_rqst
*rqstp
;
617 rqstp
= kzalloc_node(sizeof(*rqstp
), GFP_KERNEL
, node
);
621 init_waitqueue_head(&rqstp
->rq_wait
);
623 serv
->sv_nrthreads
++;
624 spin_lock_bh(&pool
->sp_lock
);
625 pool
->sp_nrthreads
++;
626 list_add(&rqstp
->rq_all
, &pool
->sp_all_threads
);
627 spin_unlock_bh(&pool
->sp_lock
);
628 rqstp
->rq_server
= serv
;
629 rqstp
->rq_pool
= pool
;
631 rqstp
->rq_argp
= kmalloc_node(serv
->sv_xdrsize
, GFP_KERNEL
, node
);
635 rqstp
->rq_resp
= kmalloc_node(serv
->sv_xdrsize
, GFP_KERNEL
, node
);
639 if (!svc_init_buffer(rqstp
, serv
->sv_max_mesg
, node
))
644 svc_exit_thread(rqstp
);
646 return ERR_PTR(-ENOMEM
);
648 EXPORT_SYMBOL_GPL(svc_prepare_thread
);
651 * Choose a pool in which to create a new thread, for svc_set_num_threads
653 static inline struct svc_pool
*
654 choose_pool(struct svc_serv
*serv
, struct svc_pool
*pool
, unsigned int *state
)
659 return &serv
->sv_pools
[(*state
)++ % serv
->sv_nrpools
];
663 * Choose a thread to kill, for svc_set_num_threads
665 static inline struct task_struct
*
666 choose_victim(struct svc_serv
*serv
, struct svc_pool
*pool
, unsigned int *state
)
669 struct task_struct
*task
= NULL
;
672 spin_lock_bh(&pool
->sp_lock
);
674 /* choose a pool in round-robin fashion */
675 for (i
= 0; i
< serv
->sv_nrpools
; i
++) {
676 pool
= &serv
->sv_pools
[--(*state
) % serv
->sv_nrpools
];
677 spin_lock_bh(&pool
->sp_lock
);
678 if (!list_empty(&pool
->sp_all_threads
))
680 spin_unlock_bh(&pool
->sp_lock
);
686 if (!list_empty(&pool
->sp_all_threads
)) {
687 struct svc_rqst
*rqstp
;
690 * Remove from the pool->sp_all_threads list
691 * so we don't try to kill it again.
693 rqstp
= list_entry(pool
->sp_all_threads
.next
, struct svc_rqst
, rq_all
);
694 list_del_init(&rqstp
->rq_all
);
695 task
= rqstp
->rq_task
;
697 spin_unlock_bh(&pool
->sp_lock
);
703 * Create or destroy enough new threads to make the number
704 * of threads the given number. If `pool' is non-NULL, applies
705 * only to threads in that pool, otherwise round-robins between
706 * all pools. Caller must ensure that mutual exclusion between this and
707 * server startup or shutdown.
709 * Destroying threads relies on the service threads filling in
710 * rqstp->rq_task, which only the nfs ones do. Assumes the serv
711 * has been created using svc_create_pooled().
713 * Based on code that used to be in nfsd_svc() but tweaked
717 svc_set_num_threads(struct svc_serv
*serv
, struct svc_pool
*pool
, int nrservs
)
719 struct svc_rqst
*rqstp
;
720 struct task_struct
*task
;
721 struct svc_pool
*chosen_pool
;
723 unsigned int state
= serv
->sv_nrthreads
-1;
727 /* The -1 assumes caller has done a svc_get() */
728 nrservs
-= (serv
->sv_nrthreads
-1);
730 spin_lock_bh(&pool
->sp_lock
);
731 nrservs
-= pool
->sp_nrthreads
;
732 spin_unlock_bh(&pool
->sp_lock
);
735 /* create new threads */
736 while (nrservs
> 0) {
738 chosen_pool
= choose_pool(serv
, pool
, &state
);
740 node
= svc_pool_map_get_node(chosen_pool
->sp_id
);
741 rqstp
= svc_prepare_thread(serv
, chosen_pool
, node
);
743 error
= PTR_ERR(rqstp
);
747 __module_get(serv
->sv_module
);
748 task
= kthread_create_on_node(serv
->sv_function
, rqstp
,
749 node
, serv
->sv_name
);
751 error
= PTR_ERR(task
);
752 module_put(serv
->sv_module
);
753 svc_exit_thread(rqstp
);
757 rqstp
->rq_task
= task
;
758 if (serv
->sv_nrpools
> 1)
759 svc_pool_map_set_cpumask(task
, chosen_pool
->sp_id
);
761 svc_sock_update_bufs(serv
);
762 wake_up_process(task
);
764 /* destroy old threads */
765 while (nrservs
< 0 &&
766 (task
= choose_victim(serv
, pool
, &state
)) != NULL
) {
767 send_sig(SIGINT
, task
, 1);
773 EXPORT_SYMBOL_GPL(svc_set_num_threads
);
776 * Called from a server thread as it's exiting. Caller must hold the BKL or
777 * the "service mutex", whichever is appropriate for the service.
780 svc_exit_thread(struct svc_rqst
*rqstp
)
782 struct svc_serv
*serv
= rqstp
->rq_server
;
783 struct svc_pool
*pool
= rqstp
->rq_pool
;
785 svc_release_buffer(rqstp
);
786 kfree(rqstp
->rq_resp
);
787 kfree(rqstp
->rq_argp
);
788 kfree(rqstp
->rq_auth_data
);
790 spin_lock_bh(&pool
->sp_lock
);
791 pool
->sp_nrthreads
--;
792 list_del(&rqstp
->rq_all
);
793 spin_unlock_bh(&pool
->sp_lock
);
797 /* Release the server */
801 EXPORT_SYMBOL_GPL(svc_exit_thread
);
804 * Register an "inet" protocol family netid with the local
805 * rpcbind daemon via an rpcbind v4 SET request.
807 * No netconfig infrastructure is available in the kernel, so
808 * we map IP_ protocol numbers to netids by hand.
810 * Returns zero on success; a negative errno value is returned
811 * if any error occurs.
813 static int __svc_rpcb_register4(struct net
*net
, const u32 program
,
815 const unsigned short protocol
,
816 const unsigned short port
)
818 const struct sockaddr_in sin
= {
819 .sin_family
= AF_INET
,
820 .sin_addr
.s_addr
= htonl(INADDR_ANY
),
821 .sin_port
= htons(port
),
828 netid
= RPCBIND_NETID_UDP
;
831 netid
= RPCBIND_NETID_TCP
;
837 error
= rpcb_v4_register(net
, program
, version
,
838 (const struct sockaddr
*)&sin
, netid
);
841 * User space didn't support rpcbind v4, so retry this
842 * registration request with the legacy rpcbind v2 protocol.
844 if (error
== -EPROTONOSUPPORT
)
845 error
= rpcb_register(net
, program
, version
, protocol
, port
);
850 #if IS_ENABLED(CONFIG_IPV6)
852 * Register an "inet6" protocol family netid with the local
853 * rpcbind daemon via an rpcbind v4 SET request.
855 * No netconfig infrastructure is available in the kernel, so
856 * we map IP_ protocol numbers to netids by hand.
858 * Returns zero on success; a negative errno value is returned
859 * if any error occurs.
861 static int __svc_rpcb_register6(struct net
*net
, const u32 program
,
863 const unsigned short protocol
,
864 const unsigned short port
)
866 const struct sockaddr_in6 sin6
= {
867 .sin6_family
= AF_INET6
,
868 .sin6_addr
= IN6ADDR_ANY_INIT
,
869 .sin6_port
= htons(port
),
876 netid
= RPCBIND_NETID_UDP6
;
879 netid
= RPCBIND_NETID_TCP6
;
885 error
= rpcb_v4_register(net
, program
, version
,
886 (const struct sockaddr
*)&sin6
, netid
);
889 * User space didn't support rpcbind version 4, so we won't
890 * use a PF_INET6 listener.
892 if (error
== -EPROTONOSUPPORT
)
893 error
= -EAFNOSUPPORT
;
897 #endif /* IS_ENABLED(CONFIG_IPV6) */
900 * Register a kernel RPC service via rpcbind version 4.
902 * Returns zero on success; a negative errno value is returned
903 * if any error occurs.
905 static int __svc_register(struct net
*net
, const char *progname
,
906 const u32 program
, const u32 version
,
908 const unsigned short protocol
,
909 const unsigned short port
)
911 int error
= -EAFNOSUPPORT
;
915 error
= __svc_rpcb_register4(net
, program
, version
,
918 #if IS_ENABLED(CONFIG_IPV6)
920 error
= __svc_rpcb_register6(net
, program
, version
,
926 printk(KERN_WARNING
"svc: failed to register %sv%u RPC "
927 "service (errno %d).\n", progname
, version
, -error
);
932 * svc_register - register an RPC service with the local portmapper
933 * @serv: svc_serv struct for the service to register
934 * @net: net namespace for the service to register
935 * @family: protocol family of service's listener socket
936 * @proto: transport protocol number to advertise
937 * @port: port to advertise
939 * Service is registered for any address in the passed-in protocol family
941 int svc_register(const struct svc_serv
*serv
, struct net
*net
,
942 const int family
, const unsigned short proto
,
943 const unsigned short port
)
945 struct svc_program
*progp
;
949 BUG_ON(proto
== 0 && port
== 0);
951 for (progp
= serv
->sv_program
; progp
; progp
= progp
->pg_next
) {
952 for (i
= 0; i
< progp
->pg_nvers
; i
++) {
953 if (progp
->pg_vers
[i
] == NULL
)
956 dprintk("svc: svc_register(%sv%d, %s, %u, %u)%s\n",
959 proto
== IPPROTO_UDP
? "udp" : "tcp",
962 progp
->pg_vers
[i
]->vs_hidden
?
963 " (but not telling portmap)" : "");
965 if (progp
->pg_vers
[i
]->vs_hidden
)
968 error
= __svc_register(net
, progp
->pg_name
, progp
->pg_prog
,
969 i
, family
, proto
, port
);
979 * If user space is running rpcbind, it should take the v4 UNSET
980 * and clear everything for this [program, version]. If user space
981 * is running portmap, it will reject the v4 UNSET, but won't have
982 * any "inet6" entries anyway. So a PMAP_UNSET should be sufficient
983 * in this case to clear all existing entries for [program, version].
985 static void __svc_unregister(struct net
*net
, const u32 program
, const u32 version
,
986 const char *progname
)
990 error
= rpcb_v4_register(net
, program
, version
, NULL
, "");
993 * User space didn't support rpcbind v4, so retry this
994 * request with the legacy rpcbind v2 protocol.
996 if (error
== -EPROTONOSUPPORT
)
997 error
= rpcb_register(net
, program
, version
, 0, 0);
999 dprintk("svc: %s(%sv%u), error %d\n",
1000 __func__
, progname
, version
, error
);
1004 * All netids, bind addresses and ports registered for [program, version]
1005 * are removed from the local rpcbind database (if the service is not
1006 * hidden) to make way for a new instance of the service.
1008 * The result of unregistration is reported via dprintk for those who want
1009 * verification of the result, but is otherwise not important.
1011 static void svc_unregister(const struct svc_serv
*serv
, struct net
*net
)
1013 struct svc_program
*progp
;
1014 unsigned long flags
;
1017 clear_thread_flag(TIF_SIGPENDING
);
1019 for (progp
= serv
->sv_program
; progp
; progp
= progp
->pg_next
) {
1020 for (i
= 0; i
< progp
->pg_nvers
; i
++) {
1021 if (progp
->pg_vers
[i
] == NULL
)
1023 if (progp
->pg_vers
[i
]->vs_hidden
)
1026 dprintk("svc: attempting to unregister %sv%u\n",
1028 __svc_unregister(net
, progp
->pg_prog
, i
, progp
->pg_name
);
1032 spin_lock_irqsave(¤t
->sighand
->siglock
, flags
);
1033 recalc_sigpending();
1034 spin_unlock_irqrestore(¤t
->sighand
->siglock
, flags
);
1038 * Printk the given error with the address of the client that caused it.
1040 static __printf(2, 3)
1041 int svc_printk(struct svc_rqst
*rqstp
, const char *fmt
, ...)
1045 char buf
[RPC_MAX_ADDRBUFLEN
];
1047 if (!net_ratelimit())
1050 printk(KERN_WARNING
"svc: %s: ",
1051 svc_print_addr(rqstp
, buf
, sizeof(buf
)));
1053 va_start(args
, fmt
);
1054 r
= vprintk(fmt
, args
);
1061 * Common routine for processing the RPC request.
1064 svc_process_common(struct svc_rqst
*rqstp
, struct kvec
*argv
, struct kvec
*resv
)
1066 struct svc_program
*progp
;
1067 struct svc_version
*versp
= NULL
; /* compiler food */
1068 struct svc_procedure
*procp
= NULL
;
1069 struct svc_serv
*serv
= rqstp
->rq_server
;
1072 u32 prog
, vers
, proc
;
1073 __be32 auth_stat
, rpc_stat
;
1075 __be32
*reply_statp
;
1077 rpc_stat
= rpc_success
;
1079 if (argv
->iov_len
< 6*4)
1082 /* Will be turned off only in gss privacy case: */
1083 rqstp
->rq_splice_ok
= 1;
1084 /* Will be turned off only when NFSv4 Sessions are used */
1085 rqstp
->rq_usedeferral
= 1;
1086 rqstp
->rq_dropme
= false;
1088 /* Setup reply header */
1089 rqstp
->rq_xprt
->xpt_ops
->xpo_prep_reply_hdr(rqstp
);
1091 svc_putu32(resv
, rqstp
->rq_xid
);
1093 vers
= svc_getnl(argv
);
1095 /* First words of reply: */
1096 svc_putnl(resv
, 1); /* REPLY */
1098 if (vers
!= 2) /* RPC version number */
1101 /* Save position in case we later decide to reject: */
1102 reply_statp
= resv
->iov_base
+ resv
->iov_len
;
1104 svc_putnl(resv
, 0); /* ACCEPT */
1106 rqstp
->rq_prog
= prog
= svc_getnl(argv
); /* program number */
1107 rqstp
->rq_vers
= vers
= svc_getnl(argv
); /* version number */
1108 rqstp
->rq_proc
= proc
= svc_getnl(argv
); /* procedure number */
1110 progp
= serv
->sv_program
;
1112 for (progp
= serv
->sv_program
; progp
; progp
= progp
->pg_next
)
1113 if (prog
== progp
->pg_prog
)
1117 * Decode auth data, and add verifier to reply buffer.
1118 * We do this before anything else in order to get a decent
1121 auth_res
= svc_authenticate(rqstp
, &auth_stat
);
1122 /* Also give the program a chance to reject this call: */
1123 if (auth_res
== SVC_OK
&& progp
) {
1124 auth_stat
= rpc_autherr_badcred
;
1125 auth_res
= progp
->pg_authenticate(rqstp
);
1133 rpc_stat
= rpc_system_err
;
1138 if (test_bit(XPT_TEMP
, &rqstp
->rq_xprt
->xpt_flags
))
1139 svc_close_xprt(rqstp
->rq_xprt
);
1149 if (vers
>= progp
->pg_nvers
||
1150 !(versp
= progp
->pg_vers
[vers
]))
1153 procp
= versp
->vs_proc
+ proc
;
1154 if (proc
>= versp
->vs_nproc
|| !procp
->pc_func
)
1156 rqstp
->rq_procinfo
= procp
;
1158 /* Syntactic check complete */
1159 serv
->sv_stats
->rpccnt
++;
1161 /* Build the reply header. */
1162 statp
= resv
->iov_base
+resv
->iov_len
;
1163 svc_putnl(resv
, RPC_SUCCESS
);
1165 /* Bump per-procedure stats counter */
1168 /* Initialize storage for argp and resp */
1169 memset(rqstp
->rq_argp
, 0, procp
->pc_argsize
);
1170 memset(rqstp
->rq_resp
, 0, procp
->pc_ressize
);
1172 /* un-reserve some of the out-queue now that we have a
1173 * better idea of reply size
1175 if (procp
->pc_xdrressize
)
1176 svc_reserve_auth(rqstp
, procp
->pc_xdrressize
<<2);
1178 /* Call the function that processes the request. */
1179 if (!versp
->vs_dispatch
) {
1180 /* Decode arguments */
1181 xdr
= procp
->pc_decode
;
1182 if (xdr
&& !xdr(rqstp
, argv
->iov_base
, rqstp
->rq_argp
))
1185 *statp
= procp
->pc_func(rqstp
, rqstp
->rq_argp
, rqstp
->rq_resp
);
1188 if (rqstp
->rq_dropme
) {
1189 if (procp
->pc_release
)
1190 procp
->pc_release(rqstp
, NULL
, rqstp
->rq_resp
);
1193 if (*statp
== rpc_success
&&
1194 (xdr
= procp
->pc_encode
) &&
1195 !xdr(rqstp
, resv
->iov_base
+resv
->iov_len
, rqstp
->rq_resp
)) {
1196 dprintk("svc: failed to encode reply\n");
1197 /* serv->sv_stats->rpcsystemerr++; */
1198 *statp
= rpc_system_err
;
1201 dprintk("svc: calling dispatcher\n");
1202 if (!versp
->vs_dispatch(rqstp
, statp
)) {
1203 /* Release reply info */
1204 if (procp
->pc_release
)
1205 procp
->pc_release(rqstp
, NULL
, rqstp
->rq_resp
);
1210 /* Check RPC status result */
1211 if (*statp
!= rpc_success
)
1212 resv
->iov_len
= ((void*)statp
) - resv
->iov_base
+ 4;
1214 /* Release reply info */
1215 if (procp
->pc_release
)
1216 procp
->pc_release(rqstp
, NULL
, rqstp
->rq_resp
);
1218 if (procp
->pc_encode
== NULL
)
1222 if (svc_authorise(rqstp
))
1224 return 1; /* Caller can now send it */
1227 svc_authorise(rqstp
); /* doesn't hurt to call this twice */
1228 dprintk("svc: svc_process dropit\n");
1232 svc_printk(rqstp
, "short len %Zd, dropping request\n",
1235 goto dropit
; /* drop request */
1238 serv
->sv_stats
->rpcbadfmt
++;
1239 svc_putnl(resv
, 1); /* REJECT */
1240 svc_putnl(resv
, 0); /* RPC_MISMATCH */
1241 svc_putnl(resv
, 2); /* Only RPCv2 supported */
1246 dprintk("svc: authentication failed (%d)\n", ntohl(auth_stat
));
1247 serv
->sv_stats
->rpcbadauth
++;
1248 /* Restore write pointer to location of accept status: */
1249 xdr_ressize_check(rqstp
, reply_statp
);
1250 svc_putnl(resv
, 1); /* REJECT */
1251 svc_putnl(resv
, 1); /* AUTH_ERROR */
1252 svc_putnl(resv
, ntohl(auth_stat
)); /* status */
1256 dprintk("svc: unknown program %d\n", prog
);
1257 serv
->sv_stats
->rpcbadfmt
++;
1258 svc_putnl(resv
, RPC_PROG_UNAVAIL
);
1262 svc_printk(rqstp
, "unknown version (%d for prog %d, %s)\n",
1263 vers
, prog
, progp
->pg_name
);
1265 serv
->sv_stats
->rpcbadfmt
++;
1266 svc_putnl(resv
, RPC_PROG_MISMATCH
);
1267 svc_putnl(resv
, progp
->pg_lovers
);
1268 svc_putnl(resv
, progp
->pg_hivers
);
1272 svc_printk(rqstp
, "unknown procedure (%d)\n", proc
);
1274 serv
->sv_stats
->rpcbadfmt
++;
1275 svc_putnl(resv
, RPC_PROC_UNAVAIL
);
1279 svc_printk(rqstp
, "failed to decode args\n");
1281 rpc_stat
= rpc_garbage_args
;
1283 serv
->sv_stats
->rpcbadfmt
++;
1284 svc_putnl(resv
, ntohl(rpc_stat
));
1287 EXPORT_SYMBOL_GPL(svc_process
);
1290 * Process the RPC request.
1293 svc_process(struct svc_rqst
*rqstp
)
1295 struct kvec
*argv
= &rqstp
->rq_arg
.head
[0];
1296 struct kvec
*resv
= &rqstp
->rq_res
.head
[0];
1297 struct svc_serv
*serv
= rqstp
->rq_server
;
1301 * Setup response xdr_buf.
1302 * Initially it has just one page
1304 rqstp
->rq_resused
= 1;
1305 resv
->iov_base
= page_address(rqstp
->rq_respages
[0]);
1307 rqstp
->rq_res
.pages
= rqstp
->rq_respages
+ 1;
1308 rqstp
->rq_res
.len
= 0;
1309 rqstp
->rq_res
.page_base
= 0;
1310 rqstp
->rq_res
.page_len
= 0;
1311 rqstp
->rq_res
.buflen
= PAGE_SIZE
;
1312 rqstp
->rq_res
.tail
[0].iov_base
= NULL
;
1313 rqstp
->rq_res
.tail
[0].iov_len
= 0;
1315 rqstp
->rq_xid
= svc_getu32(argv
);
1317 dir
= svc_getnl(argv
);
1319 /* direction != CALL */
1320 svc_printk(rqstp
, "bad direction %d, dropping request\n", dir
);
1321 serv
->sv_stats
->rpcbadfmt
++;
1326 /* Returns 1 for send, 0 for drop */
1327 if (svc_process_common(rqstp
, argv
, resv
))
1328 return svc_send(rqstp
);
1335 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
1337 * Process a backchannel RPC request that arrived over an existing
1338 * outbound connection
1341 bc_svc_process(struct svc_serv
*serv
, struct rpc_rqst
*req
,
1342 struct svc_rqst
*rqstp
)
1344 struct kvec
*argv
= &rqstp
->rq_arg
.head
[0];
1345 struct kvec
*resv
= &rqstp
->rq_res
.head
[0];
1347 /* Build the svc_rqst used by the common processing routine */
1348 rqstp
->rq_xprt
= serv
->sv_bc_xprt
;
1349 rqstp
->rq_xid
= req
->rq_xid
;
1350 rqstp
->rq_prot
= req
->rq_xprt
->prot
;
1351 rqstp
->rq_server
= serv
;
1353 rqstp
->rq_addrlen
= sizeof(req
->rq_xprt
->addr
);
1354 memcpy(&rqstp
->rq_addr
, &req
->rq_xprt
->addr
, rqstp
->rq_addrlen
);
1355 memcpy(&rqstp
->rq_arg
, &req
->rq_rcv_buf
, sizeof(rqstp
->rq_arg
));
1356 memcpy(&rqstp
->rq_res
, &req
->rq_snd_buf
, sizeof(rqstp
->rq_res
));
1358 /* reset result send buffer "put" position */
1361 if (rqstp
->rq_prot
!= IPPROTO_TCP
) {
1362 printk(KERN_ERR
"No support for Non-TCP transports!\n");
1367 * Skip the next two words because they've already been
1368 * processed in the trasport
1370 svc_getu32(argv
); /* XID */
1371 svc_getnl(argv
); /* CALLDIR */
1373 /* Returns 1 for send, 0 for drop */
1374 if (svc_process_common(rqstp
, argv
, resv
)) {
1375 memcpy(&req
->rq_snd_buf
, &rqstp
->rq_res
,
1376 sizeof(req
->rq_snd_buf
));
1377 return bc_send(req
);
1380 xprt_free_bc_request(req
);
1384 EXPORT_SYMBOL_GPL(bc_svc_process
);
1385 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
1388 * Return (transport-specific) limit on the rpc payload.
1390 u32
svc_max_payload(const struct svc_rqst
*rqstp
)
1392 u32 max
= rqstp
->rq_xprt
->xpt_class
->xcl_max_payload
;
1394 if (rqstp
->rq_server
->sv_max_payload
< max
)
1395 max
= rqstp
->rq_server
->sv_max_payload
;
1398 EXPORT_SYMBOL_GPL(svc_max_payload
);