2 * linux/net/sunrpc/svc.c
4 * High-level RPC service routines
6 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
8 * Multiple threads pools and NUMAisation
9 * Copyright (c) 2006 Silicon Graphics, Inc.
10 * by Greg Banks <gnb@melbourne.sgi.com>
13 #include <linux/linkage.h>
14 #include <linux/sched.h>
15 #include <linux/errno.h>
16 #include <linux/net.h>
19 #include <linux/interrupt.h>
20 #include <linux/module.h>
21 #include <linux/kthread.h>
22 #include <linux/slab.h>
24 #include <linux/sunrpc/types.h>
25 #include <linux/sunrpc/xdr.h>
26 #include <linux/sunrpc/stats.h>
27 #include <linux/sunrpc/svcsock.h>
28 #include <linux/sunrpc/clnt.h>
29 #include <linux/sunrpc/bc_xprt.h>
31 #define RPCDBG_FACILITY RPCDBG_SVCDSP
33 static void svc_unregister(const struct svc_serv
*serv
);
35 #define svc_serv_is_pooled(serv) ((serv)->sv_function)
38 * Mode for mapping cpus to pools.
41 SVC_POOL_AUTO
= -1, /* choose one of the others */
42 SVC_POOL_GLOBAL
, /* no mapping, just a single global pool
43 * (legacy & UP mode) */
44 SVC_POOL_PERCPU
, /* one pool per cpu */
45 SVC_POOL_PERNODE
/* one pool per numa node */
47 #define SVC_POOL_DEFAULT SVC_POOL_GLOBAL
50 * Structure for mapping cpus to pools and vice versa.
51 * Setup once during sunrpc initialisation.
53 static struct svc_pool_map
{
54 int count
; /* How many svc_servs use us */
55 int mode
; /* Note: int not enum to avoid
56 * warnings about "enumeration value
57 * not handled in switch" */
59 unsigned int *pool_to
; /* maps pool id to cpu or node */
60 unsigned int *to_pool
; /* maps cpu or node to pool id */
63 .mode
= SVC_POOL_DEFAULT
65 static DEFINE_MUTEX(svc_pool_map_mutex
);/* protects svc_pool_map.count only */
68 param_set_pool_mode(const char *val
, struct kernel_param
*kp
)
70 int *ip
= (int *)kp
->arg
;
71 struct svc_pool_map
*m
= &svc_pool_map
;
74 mutex_lock(&svc_pool_map_mutex
);
81 if (!strncmp(val
, "auto", 4))
83 else if (!strncmp(val
, "global", 6))
84 *ip
= SVC_POOL_GLOBAL
;
85 else if (!strncmp(val
, "percpu", 6))
86 *ip
= SVC_POOL_PERCPU
;
87 else if (!strncmp(val
, "pernode", 7))
88 *ip
= SVC_POOL_PERNODE
;
93 mutex_unlock(&svc_pool_map_mutex
);
98 param_get_pool_mode(char *buf
, struct kernel_param
*kp
)
100 int *ip
= (int *)kp
->arg
;
105 return strlcpy(buf
, "auto", 20);
106 case SVC_POOL_GLOBAL
:
107 return strlcpy(buf
, "global", 20);
108 case SVC_POOL_PERCPU
:
109 return strlcpy(buf
, "percpu", 20);
110 case SVC_POOL_PERNODE
:
111 return strlcpy(buf
, "pernode", 20);
113 return sprintf(buf
, "%d", *ip
);
117 module_param_call(pool_mode
, param_set_pool_mode
, param_get_pool_mode
,
118 &svc_pool_map
.mode
, 0644);
121 * Detect best pool mapping mode heuristically,
122 * according to the machine's topology.
125 svc_pool_map_choose_mode(void)
129 if (nr_online_nodes
> 1) {
131 * Actually have multiple NUMA nodes,
132 * so split pools on NUMA node boundaries
134 return SVC_POOL_PERNODE
;
137 node
= first_online_node
;
138 if (nr_cpus_node(node
) > 2) {
140 * Non-trivial SMP, or CONFIG_NUMA on
141 * non-NUMA hardware, e.g. with a generic
142 * x86_64 kernel on Xeons. In this case we
143 * want to divide the pools on cpu boundaries.
145 return SVC_POOL_PERCPU
;
148 /* default: one global pool */
149 return SVC_POOL_GLOBAL
;
153 * Allocate the to_pool[] and pool_to[] arrays.
154 * Returns 0 on success or an errno.
157 svc_pool_map_alloc_arrays(struct svc_pool_map
*m
, unsigned int maxpools
)
159 m
->to_pool
= kcalloc(maxpools
, sizeof(unsigned int), GFP_KERNEL
);
162 m
->pool_to
= kcalloc(maxpools
, sizeof(unsigned int), GFP_KERNEL
);
175 * Initialise the pool map for SVC_POOL_PERCPU mode.
176 * Returns number of pools or <0 on error.
179 svc_pool_map_init_percpu(struct svc_pool_map
*m
)
181 unsigned int maxpools
= nr_cpu_ids
;
182 unsigned int pidx
= 0;
186 err
= svc_pool_map_alloc_arrays(m
, maxpools
);
190 for_each_online_cpu(cpu
) {
191 BUG_ON(pidx
> maxpools
);
192 m
->to_pool
[cpu
] = pidx
;
193 m
->pool_to
[pidx
] = cpu
;
196 /* cpus brought online later all get mapped to pool0, sorry */
203 * Initialise the pool map for SVC_POOL_PERNODE mode.
204 * Returns number of pools or <0 on error.
207 svc_pool_map_init_pernode(struct svc_pool_map
*m
)
209 unsigned int maxpools
= nr_node_ids
;
210 unsigned int pidx
= 0;
214 err
= svc_pool_map_alloc_arrays(m
, maxpools
);
218 for_each_node_with_cpus(node
) {
219 /* some architectures (e.g. SN2) have cpuless nodes */
220 BUG_ON(pidx
> maxpools
);
221 m
->to_pool
[node
] = pidx
;
222 m
->pool_to
[pidx
] = node
;
225 /* nodes brought online later all get mapped to pool0, sorry */
232 * Add a reference to the global map of cpus to pools (and
233 * vice versa). Initialise the map if we're the first user.
234 * Returns the number of pools.
237 svc_pool_map_get(void)
239 struct svc_pool_map
*m
= &svc_pool_map
;
242 mutex_lock(&svc_pool_map_mutex
);
245 mutex_unlock(&svc_pool_map_mutex
);
249 if (m
->mode
== SVC_POOL_AUTO
)
250 m
->mode
= svc_pool_map_choose_mode();
253 case SVC_POOL_PERCPU
:
254 npools
= svc_pool_map_init_percpu(m
);
256 case SVC_POOL_PERNODE
:
257 npools
= svc_pool_map_init_pernode(m
);
262 /* default, or memory allocation failure */
264 m
->mode
= SVC_POOL_GLOBAL
;
268 mutex_unlock(&svc_pool_map_mutex
);
274 * Drop a reference to the global map of cpus to pools.
275 * When the last reference is dropped, the map data is
276 * freed; this allows the sysadmin to change the pool
277 * mode using the pool_mode module option without
278 * rebooting or re-loading sunrpc.ko.
281 svc_pool_map_put(void)
283 struct svc_pool_map
*m
= &svc_pool_map
;
285 mutex_lock(&svc_pool_map_mutex
);
288 m
->mode
= SVC_POOL_DEFAULT
;
294 mutex_unlock(&svc_pool_map_mutex
);
298 static int svc_pool_map_get_node(unsigned int pidx
)
300 const struct svc_pool_map
*m
= &svc_pool_map
;
303 if (m
->mode
== SVC_POOL_PERCPU
)
304 return cpu_to_node(m
->pool_to
[pidx
]);
305 if (m
->mode
== SVC_POOL_PERNODE
)
306 return m
->pool_to
[pidx
];
311 * Set the given thread's cpus_allowed mask so that it
312 * will only run on cpus in the given pool.
315 svc_pool_map_set_cpumask(struct task_struct
*task
, unsigned int pidx
)
317 struct svc_pool_map
*m
= &svc_pool_map
;
318 unsigned int node
= m
->pool_to
[pidx
];
321 * The caller checks for sv_nrpools > 1, which
322 * implies that we've been initialized.
324 BUG_ON(m
->count
== 0);
327 case SVC_POOL_PERCPU
:
329 set_cpus_allowed_ptr(task
, cpumask_of(node
));
332 case SVC_POOL_PERNODE
:
334 set_cpus_allowed_ptr(task
, cpumask_of_node(node
));
341 * Use the mapping mode to choose a pool for a given CPU.
342 * Used when enqueueing an incoming RPC. Always returns
343 * a non-NULL pool pointer.
346 svc_pool_for_cpu(struct svc_serv
*serv
, int cpu
)
348 struct svc_pool_map
*m
= &svc_pool_map
;
349 unsigned int pidx
= 0;
352 * An uninitialised map happens in a pure client when
353 * lockd is brought up, so silently treat it the
354 * same as SVC_POOL_GLOBAL.
356 if (svc_serv_is_pooled(serv
)) {
358 case SVC_POOL_PERCPU
:
359 pidx
= m
->to_pool
[cpu
];
361 case SVC_POOL_PERNODE
:
362 pidx
= m
->to_pool
[cpu_to_node(cpu
)];
366 return &serv
->sv_pools
[pidx
% serv
->sv_nrpools
];
369 static int svc_rpcb_setup(struct svc_serv
*serv
)
373 err
= rpcb_create_local();
377 /* Remove any stale portmap registrations */
378 svc_unregister(serv
);
382 void svc_rpcb_cleanup(struct svc_serv
*serv
)
384 svc_unregister(serv
);
387 EXPORT_SYMBOL_GPL(svc_rpcb_cleanup
);
389 static int svc_uses_rpcbind(struct svc_serv
*serv
)
391 struct svc_program
*progp
;
394 for (progp
= serv
->sv_program
; progp
; progp
= progp
->pg_next
) {
395 for (i
= 0; i
< progp
->pg_nvers
; i
++) {
396 if (progp
->pg_vers
[i
] == NULL
)
398 if (progp
->pg_vers
[i
]->vs_hidden
== 0)
407 * Create an RPC service
409 static struct svc_serv
*
410 __svc_create(struct svc_program
*prog
, unsigned int bufsize
, int npools
,
411 void (*shutdown
)(struct svc_serv
*serv
))
413 struct svc_serv
*serv
;
415 unsigned int xdrsize
;
418 if (!(serv
= kzalloc(sizeof(*serv
), GFP_KERNEL
)))
420 serv
->sv_name
= prog
->pg_name
;
421 serv
->sv_program
= prog
;
422 serv
->sv_nrthreads
= 1;
423 serv
->sv_stats
= prog
->pg_stats
;
424 if (bufsize
> RPCSVC_MAXPAYLOAD
)
425 bufsize
= RPCSVC_MAXPAYLOAD
;
426 serv
->sv_max_payload
= bufsize
? bufsize
: 4096;
427 serv
->sv_max_mesg
= roundup(serv
->sv_max_payload
+ PAGE_SIZE
, PAGE_SIZE
);
428 serv
->sv_shutdown
= shutdown
;
431 prog
->pg_lovers
= prog
->pg_nvers
-1;
432 for (vers
=0; vers
<prog
->pg_nvers
; vers
++)
433 if (prog
->pg_vers
[vers
]) {
434 prog
->pg_hivers
= vers
;
435 if (prog
->pg_lovers
> vers
)
436 prog
->pg_lovers
= vers
;
437 if (prog
->pg_vers
[vers
]->vs_xdrsize
> xdrsize
)
438 xdrsize
= prog
->pg_vers
[vers
]->vs_xdrsize
;
440 prog
= prog
->pg_next
;
442 serv
->sv_xdrsize
= xdrsize
;
443 INIT_LIST_HEAD(&serv
->sv_tempsocks
);
444 INIT_LIST_HEAD(&serv
->sv_permsocks
);
445 init_timer(&serv
->sv_temptimer
);
446 spin_lock_init(&serv
->sv_lock
);
448 serv
->sv_nrpools
= npools
;
450 kcalloc(serv
->sv_nrpools
, sizeof(struct svc_pool
),
452 if (!serv
->sv_pools
) {
457 for (i
= 0; i
< serv
->sv_nrpools
; i
++) {
458 struct svc_pool
*pool
= &serv
->sv_pools
[i
];
460 dprintk("svc: initialising pool %u for %s\n",
464 INIT_LIST_HEAD(&pool
->sp_threads
);
465 INIT_LIST_HEAD(&pool
->sp_sockets
);
466 INIT_LIST_HEAD(&pool
->sp_all_threads
);
467 spin_lock_init(&pool
->sp_lock
);
470 if (svc_uses_rpcbind(serv
)) {
471 if (svc_rpcb_setup(serv
) < 0) {
472 kfree(serv
->sv_pools
);
476 if (!serv
->sv_shutdown
)
477 serv
->sv_shutdown
= svc_rpcb_cleanup
;
484 svc_create(struct svc_program
*prog
, unsigned int bufsize
,
485 void (*shutdown
)(struct svc_serv
*serv
))
487 return __svc_create(prog
, bufsize
, /*npools*/1, shutdown
);
489 EXPORT_SYMBOL_GPL(svc_create
);
492 svc_create_pooled(struct svc_program
*prog
, unsigned int bufsize
,
493 void (*shutdown
)(struct svc_serv
*serv
),
494 svc_thread_fn func
, struct module
*mod
)
496 struct svc_serv
*serv
;
497 unsigned int npools
= svc_pool_map_get();
499 serv
= __svc_create(prog
, bufsize
, npools
, shutdown
);
502 serv
->sv_function
= func
;
503 serv
->sv_module
= mod
;
508 EXPORT_SYMBOL_GPL(svc_create_pooled
);
511 * Destroy an RPC service. Should be called with appropriate locking to
512 * protect the sv_nrthreads, sv_permsocks and sv_tempsocks.
515 svc_destroy(struct svc_serv
*serv
)
517 dprintk("svc: svc_destroy(%s, %d)\n",
518 serv
->sv_program
->pg_name
,
521 if (serv
->sv_nrthreads
) {
522 if (--(serv
->sv_nrthreads
) != 0) {
523 svc_sock_update_bufs(serv
);
527 printk("svc_destroy: no threads for serv=%p!\n", serv
);
529 del_timer_sync(&serv
->sv_temptimer
);
531 svc_close_all(&serv
->sv_tempsocks
);
533 if (serv
->sv_shutdown
)
534 serv
->sv_shutdown(serv
);
536 svc_close_all(&serv
->sv_permsocks
);
538 BUG_ON(!list_empty(&serv
->sv_permsocks
));
539 BUG_ON(!list_empty(&serv
->sv_tempsocks
));
541 cache_clean_deferred(serv
);
543 if (svc_serv_is_pooled(serv
))
546 kfree(serv
->sv_pools
);
549 EXPORT_SYMBOL_GPL(svc_destroy
);
552 * Allocate an RPC server's buffer space.
553 * We allocate pages and place them in rq_argpages.
556 svc_init_buffer(struct svc_rqst
*rqstp
, unsigned int size
, int node
)
558 unsigned int pages
, arghi
;
560 /* bc_xprt uses fore channel allocated buffers */
561 if (svc_is_backchannel(rqstp
))
564 pages
= size
/ PAGE_SIZE
+ 1; /* extra page as we hold both request and reply.
565 * We assume one is at most one page
568 BUG_ON(pages
> RPCSVC_MAXPAGES
);
570 struct page
*p
= alloc_pages_node(node
, GFP_KERNEL
, 0);
573 rqstp
->rq_pages
[arghi
++] = p
;
580 * Release an RPC server buffer
583 svc_release_buffer(struct svc_rqst
*rqstp
)
587 for (i
= 0; i
< ARRAY_SIZE(rqstp
->rq_pages
); i
++)
588 if (rqstp
->rq_pages
[i
])
589 put_page(rqstp
->rq_pages
[i
]);
593 svc_prepare_thread(struct svc_serv
*serv
, struct svc_pool
*pool
, int node
)
595 struct svc_rqst
*rqstp
;
597 rqstp
= kzalloc_node(sizeof(*rqstp
), GFP_KERNEL
, node
);
601 init_waitqueue_head(&rqstp
->rq_wait
);
603 serv
->sv_nrthreads
++;
604 spin_lock_bh(&pool
->sp_lock
);
605 pool
->sp_nrthreads
++;
606 list_add(&rqstp
->rq_all
, &pool
->sp_all_threads
);
607 spin_unlock_bh(&pool
->sp_lock
);
608 rqstp
->rq_server
= serv
;
609 rqstp
->rq_pool
= pool
;
611 rqstp
->rq_argp
= kmalloc_node(serv
->sv_xdrsize
, GFP_KERNEL
, node
);
615 rqstp
->rq_resp
= kmalloc_node(serv
->sv_xdrsize
, GFP_KERNEL
, node
);
619 if (!svc_init_buffer(rqstp
, serv
->sv_max_mesg
, node
))
624 svc_exit_thread(rqstp
);
626 return ERR_PTR(-ENOMEM
);
628 EXPORT_SYMBOL_GPL(svc_prepare_thread
);
631 * Choose a pool in which to create a new thread, for svc_set_num_threads
633 static inline struct svc_pool
*
634 choose_pool(struct svc_serv
*serv
, struct svc_pool
*pool
, unsigned int *state
)
639 return &serv
->sv_pools
[(*state
)++ % serv
->sv_nrpools
];
643 * Choose a thread to kill, for svc_set_num_threads
645 static inline struct task_struct
*
646 choose_victim(struct svc_serv
*serv
, struct svc_pool
*pool
, unsigned int *state
)
649 struct task_struct
*task
= NULL
;
652 spin_lock_bh(&pool
->sp_lock
);
654 /* choose a pool in round-robin fashion */
655 for (i
= 0; i
< serv
->sv_nrpools
; i
++) {
656 pool
= &serv
->sv_pools
[--(*state
) % serv
->sv_nrpools
];
657 spin_lock_bh(&pool
->sp_lock
);
658 if (!list_empty(&pool
->sp_all_threads
))
660 spin_unlock_bh(&pool
->sp_lock
);
666 if (!list_empty(&pool
->sp_all_threads
)) {
667 struct svc_rqst
*rqstp
;
670 * Remove from the pool->sp_all_threads list
671 * so we don't try to kill it again.
673 rqstp
= list_entry(pool
->sp_all_threads
.next
, struct svc_rqst
, rq_all
);
674 list_del_init(&rqstp
->rq_all
);
675 task
= rqstp
->rq_task
;
677 spin_unlock_bh(&pool
->sp_lock
);
683 * Create or destroy enough new threads to make the number
684 * of threads the given number. If `pool' is non-NULL, applies
685 * only to threads in that pool, otherwise round-robins between
686 * all pools. Must be called with a svc_get() reference and
687 * the BKL or another lock to protect access to svc_serv fields.
689 * Destroying threads relies on the service threads filling in
690 * rqstp->rq_task, which only the nfs ones do. Assumes the serv
691 * has been created using svc_create_pooled().
693 * Based on code that used to be in nfsd_svc() but tweaked
697 svc_set_num_threads(struct svc_serv
*serv
, struct svc_pool
*pool
, int nrservs
)
699 struct svc_rqst
*rqstp
;
700 struct task_struct
*task
;
701 struct svc_pool
*chosen_pool
;
703 unsigned int state
= serv
->sv_nrthreads
-1;
707 /* The -1 assumes caller has done a svc_get() */
708 nrservs
-= (serv
->sv_nrthreads
-1);
710 spin_lock_bh(&pool
->sp_lock
);
711 nrservs
-= pool
->sp_nrthreads
;
712 spin_unlock_bh(&pool
->sp_lock
);
715 /* create new threads */
716 while (nrservs
> 0) {
718 chosen_pool
= choose_pool(serv
, pool
, &state
);
720 node
= svc_pool_map_get_node(chosen_pool
->sp_id
);
721 rqstp
= svc_prepare_thread(serv
, chosen_pool
, node
);
723 error
= PTR_ERR(rqstp
);
727 __module_get(serv
->sv_module
);
728 task
= kthread_create_on_node(serv
->sv_function
, rqstp
,
729 node
, serv
->sv_name
);
731 error
= PTR_ERR(task
);
732 module_put(serv
->sv_module
);
733 svc_exit_thread(rqstp
);
737 rqstp
->rq_task
= task
;
738 if (serv
->sv_nrpools
> 1)
739 svc_pool_map_set_cpumask(task
, chosen_pool
->sp_id
);
741 svc_sock_update_bufs(serv
);
742 wake_up_process(task
);
744 /* destroy old threads */
745 while (nrservs
< 0 &&
746 (task
= choose_victim(serv
, pool
, &state
)) != NULL
) {
747 send_sig(SIGINT
, task
, 1);
753 EXPORT_SYMBOL_GPL(svc_set_num_threads
);
756 * Called from a server thread as it's exiting. Caller must hold the BKL or
757 * the "service mutex", whichever is appropriate for the service.
760 svc_exit_thread(struct svc_rqst
*rqstp
)
762 struct svc_serv
*serv
= rqstp
->rq_server
;
763 struct svc_pool
*pool
= rqstp
->rq_pool
;
765 svc_release_buffer(rqstp
);
766 kfree(rqstp
->rq_resp
);
767 kfree(rqstp
->rq_argp
);
768 kfree(rqstp
->rq_auth_data
);
770 spin_lock_bh(&pool
->sp_lock
);
771 pool
->sp_nrthreads
--;
772 list_del(&rqstp
->rq_all
);
773 spin_unlock_bh(&pool
->sp_lock
);
777 /* Release the server */
781 EXPORT_SYMBOL_GPL(svc_exit_thread
);
784 * Register an "inet" protocol family netid with the local
785 * rpcbind daemon via an rpcbind v4 SET request.
787 * No netconfig infrastructure is available in the kernel, so
788 * we map IP_ protocol numbers to netids by hand.
790 * Returns zero on success; a negative errno value is returned
791 * if any error occurs.
793 static int __svc_rpcb_register4(const u32 program
, const u32 version
,
794 const unsigned short protocol
,
795 const unsigned short port
)
797 const struct sockaddr_in sin
= {
798 .sin_family
= AF_INET
,
799 .sin_addr
.s_addr
= htonl(INADDR_ANY
),
800 .sin_port
= htons(port
),
807 netid
= RPCBIND_NETID_UDP
;
810 netid
= RPCBIND_NETID_TCP
;
816 error
= rpcb_v4_register(program
, version
,
817 (const struct sockaddr
*)&sin
, netid
);
820 * User space didn't support rpcbind v4, so retry this
821 * registration request with the legacy rpcbind v2 protocol.
823 if (error
== -EPROTONOSUPPORT
)
824 error
= rpcb_register(program
, version
, protocol
, port
);
829 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
831 * Register an "inet6" protocol family netid with the local
832 * rpcbind daemon via an rpcbind v4 SET request.
834 * No netconfig infrastructure is available in the kernel, so
835 * we map IP_ protocol numbers to netids by hand.
837 * Returns zero on success; a negative errno value is returned
838 * if any error occurs.
840 static int __svc_rpcb_register6(const u32 program
, const u32 version
,
841 const unsigned short protocol
,
842 const unsigned short port
)
844 const struct sockaddr_in6 sin6
= {
845 .sin6_family
= AF_INET6
,
846 .sin6_addr
= IN6ADDR_ANY_INIT
,
847 .sin6_port
= htons(port
),
854 netid
= RPCBIND_NETID_UDP6
;
857 netid
= RPCBIND_NETID_TCP6
;
863 error
= rpcb_v4_register(program
, version
,
864 (const struct sockaddr
*)&sin6
, netid
);
867 * User space didn't support rpcbind version 4, so we won't
868 * use a PF_INET6 listener.
870 if (error
== -EPROTONOSUPPORT
)
871 error
= -EAFNOSUPPORT
;
875 #endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */
878 * Register a kernel RPC service via rpcbind version 4.
880 * Returns zero on success; a negative errno value is returned
881 * if any error occurs.
883 static int __svc_register(const char *progname
,
884 const u32 program
, const u32 version
,
886 const unsigned short protocol
,
887 const unsigned short port
)
889 int error
= -EAFNOSUPPORT
;
893 error
= __svc_rpcb_register4(program
, version
,
896 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
898 error
= __svc_rpcb_register6(program
, version
,
900 #endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */
904 printk(KERN_WARNING
"svc: failed to register %sv%u RPC "
905 "service (errno %d).\n", progname
, version
, -error
);
910 * svc_register - register an RPC service with the local portmapper
911 * @serv: svc_serv struct for the service to register
912 * @family: protocol family of service's listener socket
913 * @proto: transport protocol number to advertise
914 * @port: port to advertise
916 * Service is registered for any address in the passed-in protocol family
918 int svc_register(const struct svc_serv
*serv
, const int family
,
919 const unsigned short proto
, const unsigned short port
)
921 struct svc_program
*progp
;
925 BUG_ON(proto
== 0 && port
== 0);
927 for (progp
= serv
->sv_program
; progp
; progp
= progp
->pg_next
) {
928 for (i
= 0; i
< progp
->pg_nvers
; i
++) {
929 if (progp
->pg_vers
[i
] == NULL
)
932 dprintk("svc: svc_register(%sv%d, %s, %u, %u)%s\n",
935 proto
== IPPROTO_UDP
? "udp" : "tcp",
938 progp
->pg_vers
[i
]->vs_hidden
?
939 " (but not telling portmap)" : "");
941 if (progp
->pg_vers
[i
]->vs_hidden
)
944 error
= __svc_register(progp
->pg_name
, progp
->pg_prog
,
945 i
, family
, proto
, port
);
955 * If user space is running rpcbind, it should take the v4 UNSET
956 * and clear everything for this [program, version]. If user space
957 * is running portmap, it will reject the v4 UNSET, but won't have
958 * any "inet6" entries anyway. So a PMAP_UNSET should be sufficient
959 * in this case to clear all existing entries for [program, version].
961 static void __svc_unregister(const u32 program
, const u32 version
,
962 const char *progname
)
966 error
= rpcb_v4_register(program
, version
, NULL
, "");
969 * User space didn't support rpcbind v4, so retry this
970 * request with the legacy rpcbind v2 protocol.
972 if (error
== -EPROTONOSUPPORT
)
973 error
= rpcb_register(program
, version
, 0, 0);
975 dprintk("svc: %s(%sv%u), error %d\n",
976 __func__
, progname
, version
, error
);
980 * All netids, bind addresses and ports registered for [program, version]
981 * are removed from the local rpcbind database (if the service is not
982 * hidden) to make way for a new instance of the service.
984 * The result of unregistration is reported via dprintk for those who want
985 * verification of the result, but is otherwise not important.
987 static void svc_unregister(const struct svc_serv
*serv
)
989 struct svc_program
*progp
;
993 clear_thread_flag(TIF_SIGPENDING
);
995 for (progp
= serv
->sv_program
; progp
; progp
= progp
->pg_next
) {
996 for (i
= 0; i
< progp
->pg_nvers
; i
++) {
997 if (progp
->pg_vers
[i
] == NULL
)
999 if (progp
->pg_vers
[i
]->vs_hidden
)
1002 dprintk("svc: attempting to unregister %sv%u\n",
1004 __svc_unregister(progp
->pg_prog
, i
, progp
->pg_name
);
1008 spin_lock_irqsave(¤t
->sighand
->siglock
, flags
);
1009 recalc_sigpending();
1010 spin_unlock_irqrestore(¤t
->sighand
->siglock
, flags
);
1014 * Printk the given error with the address of the client that caused it.
1016 static __printf(2, 3)
1017 int svc_printk(struct svc_rqst
*rqstp
, const char *fmt
, ...)
1021 char buf
[RPC_MAX_ADDRBUFLEN
];
1023 if (!net_ratelimit())
1026 printk(KERN_WARNING
"svc: %s: ",
1027 svc_print_addr(rqstp
, buf
, sizeof(buf
)));
1029 va_start(args
, fmt
);
1030 r
= vprintk(fmt
, args
);
1037 * Common routine for processing the RPC request.
1040 svc_process_common(struct svc_rqst
*rqstp
, struct kvec
*argv
, struct kvec
*resv
)
1042 struct svc_program
*progp
;
1043 struct svc_version
*versp
= NULL
; /* compiler food */
1044 struct svc_procedure
*procp
= NULL
;
1045 struct svc_serv
*serv
= rqstp
->rq_server
;
1048 u32 prog
, vers
, proc
;
1049 __be32 auth_stat
, rpc_stat
;
1051 __be32
*reply_statp
;
1053 rpc_stat
= rpc_success
;
1055 if (argv
->iov_len
< 6*4)
1058 /* Will be turned off only in gss privacy case: */
1059 rqstp
->rq_splice_ok
= 1;
1060 /* Will be turned off only when NFSv4 Sessions are used */
1061 rqstp
->rq_usedeferral
= 1;
1062 rqstp
->rq_dropme
= false;
1064 /* Setup reply header */
1065 rqstp
->rq_xprt
->xpt_ops
->xpo_prep_reply_hdr(rqstp
);
1067 svc_putu32(resv
, rqstp
->rq_xid
);
1069 vers
= svc_getnl(argv
);
1071 /* First words of reply: */
1072 svc_putnl(resv
, 1); /* REPLY */
1074 if (vers
!= 2) /* RPC version number */
1077 /* Save position in case we later decide to reject: */
1078 reply_statp
= resv
->iov_base
+ resv
->iov_len
;
1080 svc_putnl(resv
, 0); /* ACCEPT */
1082 rqstp
->rq_prog
= prog
= svc_getnl(argv
); /* program number */
1083 rqstp
->rq_vers
= vers
= svc_getnl(argv
); /* version number */
1084 rqstp
->rq_proc
= proc
= svc_getnl(argv
); /* procedure number */
1086 progp
= serv
->sv_program
;
1088 for (progp
= serv
->sv_program
; progp
; progp
= progp
->pg_next
)
1089 if (prog
== progp
->pg_prog
)
1093 * Decode auth data, and add verifier to reply buffer.
1094 * We do this before anything else in order to get a decent
1097 auth_res
= svc_authenticate(rqstp
, &auth_stat
);
1098 /* Also give the program a chance to reject this call: */
1099 if (auth_res
== SVC_OK
&& progp
) {
1100 auth_stat
= rpc_autherr_badcred
;
1101 auth_res
= progp
->pg_authenticate(rqstp
);
1109 rpc_stat
= rpc_system_err
;
1114 if (test_bit(XPT_TEMP
, &rqstp
->rq_xprt
->xpt_flags
))
1115 svc_close_xprt(rqstp
->rq_xprt
);
1125 if (vers
>= progp
->pg_nvers
||
1126 !(versp
= progp
->pg_vers
[vers
]))
1129 procp
= versp
->vs_proc
+ proc
;
1130 if (proc
>= versp
->vs_nproc
|| !procp
->pc_func
)
1132 rqstp
->rq_procinfo
= procp
;
1134 /* Syntactic check complete */
1135 serv
->sv_stats
->rpccnt
++;
1137 /* Build the reply header. */
1138 statp
= resv
->iov_base
+resv
->iov_len
;
1139 svc_putnl(resv
, RPC_SUCCESS
);
1141 /* Bump per-procedure stats counter */
1144 /* Initialize storage for argp and resp */
1145 memset(rqstp
->rq_argp
, 0, procp
->pc_argsize
);
1146 memset(rqstp
->rq_resp
, 0, procp
->pc_ressize
);
1148 /* un-reserve some of the out-queue now that we have a
1149 * better idea of reply size
1151 if (procp
->pc_xdrressize
)
1152 svc_reserve_auth(rqstp
, procp
->pc_xdrressize
<<2);
1154 /* Call the function that processes the request. */
1155 if (!versp
->vs_dispatch
) {
1156 /* Decode arguments */
1157 xdr
= procp
->pc_decode
;
1158 if (xdr
&& !xdr(rqstp
, argv
->iov_base
, rqstp
->rq_argp
))
1161 *statp
= procp
->pc_func(rqstp
, rqstp
->rq_argp
, rqstp
->rq_resp
);
1164 if (rqstp
->rq_dropme
) {
1165 if (procp
->pc_release
)
1166 procp
->pc_release(rqstp
, NULL
, rqstp
->rq_resp
);
1169 if (*statp
== rpc_success
&&
1170 (xdr
= procp
->pc_encode
) &&
1171 !xdr(rqstp
, resv
->iov_base
+resv
->iov_len
, rqstp
->rq_resp
)) {
1172 dprintk("svc: failed to encode reply\n");
1173 /* serv->sv_stats->rpcsystemerr++; */
1174 *statp
= rpc_system_err
;
1177 dprintk("svc: calling dispatcher\n");
1178 if (!versp
->vs_dispatch(rqstp
, statp
)) {
1179 /* Release reply info */
1180 if (procp
->pc_release
)
1181 procp
->pc_release(rqstp
, NULL
, rqstp
->rq_resp
);
1186 /* Check RPC status result */
1187 if (*statp
!= rpc_success
)
1188 resv
->iov_len
= ((void*)statp
) - resv
->iov_base
+ 4;
1190 /* Release reply info */
1191 if (procp
->pc_release
)
1192 procp
->pc_release(rqstp
, NULL
, rqstp
->rq_resp
);
1194 if (procp
->pc_encode
== NULL
)
1198 if (svc_authorise(rqstp
))
1200 return 1; /* Caller can now send it */
1203 svc_authorise(rqstp
); /* doesn't hurt to call this twice */
1204 dprintk("svc: svc_process dropit\n");
1208 svc_printk(rqstp
, "short len %Zd, dropping request\n",
1211 goto dropit
; /* drop request */
1214 serv
->sv_stats
->rpcbadfmt
++;
1215 svc_putnl(resv
, 1); /* REJECT */
1216 svc_putnl(resv
, 0); /* RPC_MISMATCH */
1217 svc_putnl(resv
, 2); /* Only RPCv2 supported */
1222 dprintk("svc: authentication failed (%d)\n", ntohl(auth_stat
));
1223 serv
->sv_stats
->rpcbadauth
++;
1224 /* Restore write pointer to location of accept status: */
1225 xdr_ressize_check(rqstp
, reply_statp
);
1226 svc_putnl(resv
, 1); /* REJECT */
1227 svc_putnl(resv
, 1); /* AUTH_ERROR */
1228 svc_putnl(resv
, ntohl(auth_stat
)); /* status */
1232 dprintk("svc: unknown program %d\n", prog
);
1233 serv
->sv_stats
->rpcbadfmt
++;
1234 svc_putnl(resv
, RPC_PROG_UNAVAIL
);
1238 svc_printk(rqstp
, "unknown version (%d for prog %d, %s)\n",
1239 vers
, prog
, progp
->pg_name
);
1241 serv
->sv_stats
->rpcbadfmt
++;
1242 svc_putnl(resv
, RPC_PROG_MISMATCH
);
1243 svc_putnl(resv
, progp
->pg_lovers
);
1244 svc_putnl(resv
, progp
->pg_hivers
);
1248 svc_printk(rqstp
, "unknown procedure (%d)\n", proc
);
1250 serv
->sv_stats
->rpcbadfmt
++;
1251 svc_putnl(resv
, RPC_PROC_UNAVAIL
);
1255 svc_printk(rqstp
, "failed to decode args\n");
1257 rpc_stat
= rpc_garbage_args
;
1259 serv
->sv_stats
->rpcbadfmt
++;
1260 svc_putnl(resv
, ntohl(rpc_stat
));
1263 EXPORT_SYMBOL_GPL(svc_process
);
1266 * Process the RPC request.
1269 svc_process(struct svc_rqst
*rqstp
)
1271 struct kvec
*argv
= &rqstp
->rq_arg
.head
[0];
1272 struct kvec
*resv
= &rqstp
->rq_res
.head
[0];
1273 struct svc_serv
*serv
= rqstp
->rq_server
;
1277 * Setup response xdr_buf.
1278 * Initially it has just one page
1280 rqstp
->rq_resused
= 1;
1281 resv
->iov_base
= page_address(rqstp
->rq_respages
[0]);
1283 rqstp
->rq_res
.pages
= rqstp
->rq_respages
+ 1;
1284 rqstp
->rq_res
.len
= 0;
1285 rqstp
->rq_res
.page_base
= 0;
1286 rqstp
->rq_res
.page_len
= 0;
1287 rqstp
->rq_res
.buflen
= PAGE_SIZE
;
1288 rqstp
->rq_res
.tail
[0].iov_base
= NULL
;
1289 rqstp
->rq_res
.tail
[0].iov_len
= 0;
1291 rqstp
->rq_xid
= svc_getu32(argv
);
1293 dir
= svc_getnl(argv
);
1295 /* direction != CALL */
1296 svc_printk(rqstp
, "bad direction %d, dropping request\n", dir
);
1297 serv
->sv_stats
->rpcbadfmt
++;
1302 /* Returns 1 for send, 0 for drop */
1303 if (svc_process_common(rqstp
, argv
, resv
))
1304 return svc_send(rqstp
);
1311 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
1313 * Process a backchannel RPC request that arrived over an existing
1314 * outbound connection
1317 bc_svc_process(struct svc_serv
*serv
, struct rpc_rqst
*req
,
1318 struct svc_rqst
*rqstp
)
1320 struct kvec
*argv
= &rqstp
->rq_arg
.head
[0];
1321 struct kvec
*resv
= &rqstp
->rq_res
.head
[0];
1323 /* Build the svc_rqst used by the common processing routine */
1324 rqstp
->rq_xprt
= serv
->sv_bc_xprt
;
1325 rqstp
->rq_xid
= req
->rq_xid
;
1326 rqstp
->rq_prot
= req
->rq_xprt
->prot
;
1327 rqstp
->rq_server
= serv
;
1329 rqstp
->rq_addrlen
= sizeof(req
->rq_xprt
->addr
);
1330 memcpy(&rqstp
->rq_addr
, &req
->rq_xprt
->addr
, rqstp
->rq_addrlen
);
1331 memcpy(&rqstp
->rq_arg
, &req
->rq_rcv_buf
, sizeof(rqstp
->rq_arg
));
1332 memcpy(&rqstp
->rq_res
, &req
->rq_snd_buf
, sizeof(rqstp
->rq_res
));
1334 /* reset result send buffer "put" position */
1337 if (rqstp
->rq_prot
!= IPPROTO_TCP
) {
1338 printk(KERN_ERR
"No support for Non-TCP transports!\n");
1343 * Skip the next two words because they've already been
1344 * processed in the trasport
1346 svc_getu32(argv
); /* XID */
1347 svc_getnl(argv
); /* CALLDIR */
1349 /* Returns 1 for send, 0 for drop */
1350 if (svc_process_common(rqstp
, argv
, resv
)) {
1351 memcpy(&req
->rq_snd_buf
, &rqstp
->rq_res
,
1352 sizeof(req
->rq_snd_buf
));
1353 return bc_send(req
);
1355 /* Nothing to do to drop request */
1359 EXPORT_SYMBOL_GPL(bc_svc_process
);
1360 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
1363 * Return (transport-specific) limit on the rpc payload.
1365 u32
svc_max_payload(const struct svc_rqst
*rqstp
)
1367 u32 max
= rqstp
->rq_xprt
->xpt_class
->xcl_max_payload
;
1369 if (rqstp
->rq_server
->sv_max_payload
< max
)
1370 max
= rqstp
->rq_server
->sv_max_payload
;
1373 EXPORT_SYMBOL_GPL(svc_max_payload
);