1 /* SPDX-License-Identifier: GPL-2.0 */
3 * linux/include/linux/sunrpc/svc.h
5 * RPC server declarations.
7 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
15 #include <linux/in6.h>
16 #include <linux/sunrpc/types.h>
17 #include <linux/sunrpc/xdr.h>
18 #include <linux/sunrpc/auth.h>
19 #include <linux/sunrpc/svcauth.h>
20 #include <linux/wait.h>
23 /* statistics for svc_pool structures */
24 struct svc_pool_stats
{
25 atomic_long_t packets
;
26 unsigned long sockets_queued
;
27 atomic_long_t threads_woken
;
28 atomic_long_t threads_timedout
;
33 * RPC service thread pool.
35 * Pool of threads and temporary sockets. Generally there is only
36 * a single one of these per RPC service, but on NUMA machines those
37 * services that can benefit from it (i.e. nfs but not lockd) will
38 * have one pool per NUMA node. This optimisation reduces cross-
39 * node traffic on multi-node NUMA NFS servers.
42 unsigned int sp_id
; /* pool id; also node id on NUMA */
43 spinlock_t sp_lock
; /* protects all fields */
44 struct list_head sp_sockets
; /* pending sockets */
45 unsigned int sp_nrthreads
; /* # of threads in pool */
46 struct list_head sp_all_threads
; /* all server threads */
47 struct svc_pool_stats sp_stats
; /* statistics on pool operation */
48 #define SP_TASK_PENDING (0) /* still work to do even if no
50 #define SP_CONGESTED (1)
51 unsigned long sp_flags
;
52 } ____cacheline_aligned_in_smp
;
57 /* Callback to use when last thread exits. */
58 void (*svo_shutdown
)(struct svc_serv
*, struct net
*);
60 /* function for service threads to run */
61 int (*svo_function
)(void *);
63 /* queue up a transport for servicing */
64 void (*svo_enqueue_xprt
)(struct svc_xprt
*);
66 /* set up thread (or whatever) execution context */
67 int (*svo_setup
)(struct svc_serv
*, struct svc_pool
*, int);
69 /* optional module to count when adding threads (pooled svcs only) */
70 struct module
*svo_module
;
76 * An RPC service is a ``daemon,'' possibly multithreaded, which
77 * receives and processes incoming RPC messages.
78 * It has one or more transport sockets associated with it, and maintains
79 * a list of idle threads waiting for input.
81 * We currently do not support more than one RPC program per daemon.
84 struct svc_program
* sv_program
; /* RPC program */
85 struct svc_stat
* sv_stats
; /* RPC statistics */
87 unsigned int sv_nrthreads
; /* # of server threads */
88 unsigned int sv_maxconn
; /* max connections allowed or
89 * '0' causing max to be based
90 * on number of threads. */
92 unsigned int sv_max_payload
; /* datagram payload size */
93 unsigned int sv_max_mesg
; /* max_payload + 1 page for overheads */
94 unsigned int sv_xdrsize
; /* XDR buffer size */
95 struct list_head sv_permsocks
; /* all permanent sockets */
96 struct list_head sv_tempsocks
; /* all temporary sockets */
97 int sv_tmpcnt
; /* count of temporary sockets */
98 struct timer_list sv_temptimer
; /* timer for aging temporary sockets */
100 char * sv_name
; /* service name */
102 unsigned int sv_nrpools
; /* number of thread pools */
103 struct svc_pool
* sv_pools
; /* array of thread pools */
104 const struct svc_serv_ops
*sv_ops
; /* server operations */
105 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
106 struct list_head sv_cb_list
; /* queue for callback requests
107 * that arrive over the same
109 spinlock_t sv_cb_lock
; /* protects the svc_cb_list */
110 wait_queue_head_t sv_cb_waitq
; /* sleep here if there are no
111 * entries in the svc_cb_list */
112 struct svc_xprt
*sv_bc_xprt
; /* callback on fore channel */
113 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
117 * We use sv_nrthreads as a reference count. svc_destroy() drops
118 * this refcount, so we need to bump it up around operations that
119 * change the number of threads. Horrible, but there it is.
120 * Should be called with the "service mutex" held.
122 static inline void svc_get(struct svc_serv
*serv
)
124 serv
->sv_nrthreads
++;
128 * Maximum payload size supported by a kernel RPC server.
129 * This is use to determine the max number of pages nfsd is
130 * willing to return in a single READ operation.
132 * These happen to all be powers of 2, which is not strictly
133 * necessary but helps enforce the real limitation, which is
134 * that they should be multiples of PAGE_SIZE.
136 * For UDP transports, a block plus NFS,RPC, and UDP headers
137 * has to fit into the IP datagram limit of 64K. The largest
138 * feasible number for all known page sizes is probably 48K,
139 * but we choose 32K here. This is the same as the historical
140 * Linux limit; someone who cares more about NFS/UDP performance
141 * can test a larger number.
143 * For TCP transports we have more freedom. A size of 1MB is
144 * chosen to match the client limit. Other OSes are known to
145 * have larger limits, but those numbers are probably beyond
146 * the point of diminishing returns.
148 #define RPCSVC_MAXPAYLOAD (1*1024*1024u)
149 #define RPCSVC_MAXPAYLOAD_TCP RPCSVC_MAXPAYLOAD
150 #define RPCSVC_MAXPAYLOAD_UDP (32*1024u)
152 extern u32
svc_max_payload(const struct svc_rqst
*rqstp
);
155 * RPC Requsts and replies are stored in one or more pages.
156 * We maintain an array of pages for each server thread.
157 * Requests are copied into these pages as they arrive. Remaining
158 * pages are available to write the reply into.
160 * Pages are sent using ->sendpage so each server thread needs to
161 * allocate more to replace those used in sending. To help keep track
162 * of these pages we have a receive list where all pages initialy live,
163 * and a send list where pages are moved to when there are to be part
166 * We use xdr_buf for holding responses as it fits well with NFS
167 * read responses (that have a header, and some data pages, and possibly
168 * a tail) and means we can share some client side routines.
170 * The xdr_buf.head kvec always points to the first page in the rq_*pages
171 * list. The xdr_buf.pages pointer points to the second page on that
172 * list. xdr_buf.tail points to the end of the first page.
173 * This assumes that the non-page part of an rpc reply will fit
174 * in a page - NFSd ensures this. lockd also has no trouble.
176 * Each request/reply pair can have at most one "payload", plus two pages,
177 * one for the request, and one for the reply.
178 * We using ->sendfile to return read data, we might need one extra page
179 * if the request is not page-aligned. So add another '1'.
181 #define RPCSVC_MAXPAGES ((RPCSVC_MAXPAYLOAD+PAGE_SIZE-1)/PAGE_SIZE \
184 static inline u32
svc_getnl(struct kvec
*iov
)
189 iov
->iov_base
= (void*)vp
;
190 iov
->iov_len
-= sizeof(__be32
);
194 static inline void svc_putnl(struct kvec
*iov
, u32 val
)
196 __be32
*vp
= iov
->iov_base
+ iov
->iov_len
;
198 iov
->iov_len
+= sizeof(__be32
);
201 static inline __be32
svc_getu32(struct kvec
*iov
)
206 iov
->iov_base
= (void*)vp
;
207 iov
->iov_len
-= sizeof(__be32
);
211 static inline void svc_ungetu32(struct kvec
*iov
)
213 __be32
*vp
= (__be32
*)iov
->iov_base
;
214 iov
->iov_base
= (void *)(vp
- 1);
215 iov
->iov_len
+= sizeof(*vp
);
218 static inline void svc_putu32(struct kvec
*iov
, __be32 val
)
220 __be32
*vp
= iov
->iov_base
+ iov
->iov_len
;
222 iov
->iov_len
+= sizeof(__be32
);
226 * The context of a single thread, including the request currently being
230 struct list_head rq_all
; /* all threads list */
231 struct rcu_head rq_rcu_head
; /* for RCU deferred kfree */
232 struct svc_xprt
* rq_xprt
; /* transport ptr */
234 struct sockaddr_storage rq_addr
; /* peer address */
236 struct sockaddr_storage rq_daddr
; /* dest addr of request
237 * - reply from here */
240 struct svc_serv
* rq_server
; /* RPC service definition */
241 struct svc_pool
* rq_pool
; /* thread pool */
242 const struct svc_procedure
*rq_procinfo
;/* procedure info */
243 struct auth_ops
* rq_authop
; /* authentication flavour */
244 struct svc_cred rq_cred
; /* auth info */
245 void * rq_xprt_ctxt
; /* transport specific context ptr */
246 struct svc_deferred_req
*rq_deferred
; /* deferred request we are replaying */
248 size_t rq_xprt_hlen
; /* xprt header len */
249 struct xdr_buf rq_arg
;
250 struct xdr_buf rq_res
;
251 struct page
*rq_pages
[RPCSVC_MAXPAGES
+ 1];
252 struct page
* *rq_respages
; /* points into rq_pages */
253 struct page
* *rq_next_page
; /* next reply page to use */
254 struct page
* *rq_page_end
; /* one past the last page */
256 struct kvec rq_vec
[RPCSVC_MAXPAGES
]; /* generally useful.. */
258 __be32 rq_xid
; /* transmission id */
259 u32 rq_prog
; /* program number */
260 u32 rq_vers
; /* program version */
261 u32 rq_proc
; /* procedure number */
262 u32 rq_prot
; /* IP protocol */
263 int rq_cachetype
; /* catering to nfsd */
264 #define RQ_SECURE (0) /* secure port */
265 #define RQ_LOCAL (1) /* local request */
266 #define RQ_USEDEFERRAL (2) /* use deferral */
267 #define RQ_DROPME (3) /* drop current reply */
268 #define RQ_SPLICE_OK (4) /* turned off in gss privacy
269 * to prevent encrypting page
271 #define RQ_VICTIM (5) /* about to be shut down */
272 #define RQ_BUSY (6) /* request is busy */
273 #define RQ_DATA (7) /* request has data */
274 unsigned long rq_flags
; /* flags field */
276 void * rq_argp
; /* decoded arguments */
277 void * rq_resp
; /* xdr'd results */
278 void * rq_auth_data
; /* flavor-specific data */
279 int rq_auth_slack
; /* extra space xdr code
280 * should leave in head
283 int rq_reserved
; /* space on socket outq
284 * reserved for this request
287 struct cache_req rq_chandle
; /* handle passed to caches for
290 /* Catering to nfsd */
291 struct auth_domain
* rq_client
; /* RPC peer info */
292 struct auth_domain
* rq_gssclient
; /* "gss/"-style peer info */
293 struct svc_cacherep
* rq_cacherep
; /* cache info */
294 struct task_struct
*rq_task
; /* service thread */
295 spinlock_t rq_lock
; /* per-request lock */
298 #define SVC_NET(svc_rqst) (svc_rqst->rq_xprt->xpt_net)
301 * Rigorous type checking on sockaddr type conversions
303 static inline struct sockaddr_in
*svc_addr_in(const struct svc_rqst
*rqst
)
305 return (struct sockaddr_in
*) &rqst
->rq_addr
;
308 static inline struct sockaddr_in6
*svc_addr_in6(const struct svc_rqst
*rqst
)
310 return (struct sockaddr_in6
*) &rqst
->rq_addr
;
313 static inline struct sockaddr
*svc_addr(const struct svc_rqst
*rqst
)
315 return (struct sockaddr
*) &rqst
->rq_addr
;
318 static inline struct sockaddr_in
*svc_daddr_in(const struct svc_rqst
*rqst
)
320 return (struct sockaddr_in
*) &rqst
->rq_daddr
;
323 static inline struct sockaddr_in6
*svc_daddr_in6(const struct svc_rqst
*rqst
)
325 return (struct sockaddr_in6
*) &rqst
->rq_daddr
;
328 static inline struct sockaddr
*svc_daddr(const struct svc_rqst
*rqst
)
330 return (struct sockaddr
*) &rqst
->rq_daddr
;
334 * Check buffer bounds after decoding arguments
337 xdr_argsize_check(struct svc_rqst
*rqstp
, __be32
*p
)
339 char *cp
= (char *)p
;
340 struct kvec
*vec
= &rqstp
->rq_arg
.head
[0];
341 return cp
>= (char*)vec
->iov_base
342 && cp
<= (char*)vec
->iov_base
+ vec
->iov_len
;
346 xdr_ressize_check(struct svc_rqst
*rqstp
, __be32
*p
)
348 struct kvec
*vec
= &rqstp
->rq_res
.head
[0];
351 vec
->iov_len
= cp
- (char*)vec
->iov_base
;
353 return vec
->iov_len
<= PAGE_SIZE
;
356 static inline void svc_free_res_pages(struct svc_rqst
*rqstp
)
358 while (rqstp
->rq_next_page
!= rqstp
->rq_respages
) {
359 struct page
**pp
= --rqstp
->rq_next_page
;
367 struct svc_deferred_req
{
368 u32 prot
; /* protocol (UDP or TCP) */
369 struct svc_xprt
*xprt
;
370 struct sockaddr_storage addr
; /* where reply must go */
372 struct sockaddr_storage daddr
; /* where reply must come from */
374 struct cache_deferred_req handle
;
381 * List of RPC programs on the same transport endpoint
384 struct svc_program
* pg_next
; /* other programs (same xprt) */
385 u32 pg_prog
; /* program number */
386 unsigned int pg_lovers
; /* lowest version */
387 unsigned int pg_hivers
; /* highest version */
388 unsigned int pg_nvers
; /* number of versions */
389 const struct svc_version
**pg_vers
; /* version array */
390 char * pg_name
; /* service name */
391 char * pg_class
; /* class name: services sharing authentication */
392 struct svc_stat
* pg_stats
; /* rpc statistics */
393 int (*pg_authenticate
)(struct svc_rqst
*);
397 * RPC program version
400 u32 vs_vers
; /* version number */
401 u32 vs_nproc
; /* number of procedures */
402 const struct svc_procedure
*vs_proc
; /* per-procedure info */
403 unsigned int *vs_count
; /* call counts */
404 u32 vs_xdrsize
; /* xdrsize needed for this version */
406 /* Don't register with rpcbind */
409 /* Don't care if the rpcbind registration fails */
412 /* Need xprt with congestion control */
413 bool vs_need_cong_ctrl
;
415 /* Override dispatch function (e.g. when caching replies).
416 * A return value of 0 means drop the request.
417 * vs_dispatch == NULL means use default dispatcher.
419 int (*vs_dispatch
)(struct svc_rqst
*, __be32
*);
425 struct svc_procedure
{
426 /* process the request: */
427 __be32 (*pc_func
)(struct svc_rqst
*);
428 /* XDR decode args: */
429 int (*pc_decode
)(struct svc_rqst
*, __be32
*data
);
430 /* XDR encode result: */
431 int (*pc_encode
)(struct svc_rqst
*, __be32
*data
);
432 /* XDR free result: */
433 void (*pc_release
)(struct svc_rqst
*);
434 unsigned int pc_argsize
; /* argument struct size */
435 unsigned int pc_ressize
; /* result struct size */
436 unsigned int pc_cachetype
; /* cache info (NFS) */
437 unsigned int pc_xdrressize
; /* maximum size of XDR reply */
441 * Mode for mapping cpus to pools.
444 SVC_POOL_AUTO
= -1, /* choose one of the others */
445 SVC_POOL_GLOBAL
, /* no mapping, just a single global pool
446 * (legacy & UP mode) */
447 SVC_POOL_PERCPU
, /* one pool per cpu */
448 SVC_POOL_PERNODE
/* one pool per numa node */
451 struct svc_pool_map
{
452 int count
; /* How many svc_servs use us */
453 int mode
; /* Note: int not enum to avoid
454 * warnings about "enumeration value
455 * not handled in switch" */
457 unsigned int *pool_to
; /* maps pool id to cpu or node */
458 unsigned int *to_pool
; /* maps cpu or node to pool id */
461 extern struct svc_pool_map svc_pool_map
;
464 * Function prototypes.
466 int svc_rpcb_setup(struct svc_serv
*serv
, struct net
*net
);
467 void svc_rpcb_cleanup(struct svc_serv
*serv
, struct net
*net
);
468 int svc_bind(struct svc_serv
*serv
, struct net
*net
);
469 struct svc_serv
*svc_create(struct svc_program
*, unsigned int,
470 const struct svc_serv_ops
*);
471 struct svc_rqst
*svc_rqst_alloc(struct svc_serv
*serv
,
472 struct svc_pool
*pool
, int node
);
473 struct svc_rqst
*svc_prepare_thread(struct svc_serv
*serv
,
474 struct svc_pool
*pool
, int node
);
475 void svc_rqst_free(struct svc_rqst
*);
476 void svc_exit_thread(struct svc_rqst
*);
477 unsigned int svc_pool_map_get(void);
478 void svc_pool_map_put(void);
479 struct svc_serv
* svc_create_pooled(struct svc_program
*, unsigned int,
480 const struct svc_serv_ops
*);
481 int svc_set_num_threads(struct svc_serv
*, struct svc_pool
*, int);
482 int svc_set_num_threads_sync(struct svc_serv
*, struct svc_pool
*, int);
483 int svc_pool_stats_open(struct svc_serv
*serv
, struct file
*file
);
484 void svc_destroy(struct svc_serv
*);
485 void svc_shutdown_net(struct svc_serv
*, struct net
*);
486 int svc_process(struct svc_rqst
*);
487 int bc_svc_process(struct svc_serv
*, struct rpc_rqst
*,
489 int svc_register(const struct svc_serv
*, struct net
*, const int,
490 const unsigned short, const unsigned short);
492 void svc_wake_up(struct svc_serv
*);
493 void svc_reserve(struct svc_rqst
*rqstp
, int space
);
494 struct svc_pool
* svc_pool_for_cpu(struct svc_serv
*serv
, int cpu
);
495 char * svc_print_addr(struct svc_rqst
*, char *, size_t);
497 #define RPC_MAX_ADDRBUFLEN (63U)
500 * When we want to reduce the size of the reserved space in the response
501 * buffer, we need to take into account the size of any checksum data that
502 * may be at the end of the packet. This is difficult to determine exactly
503 * for all cases without actually generating the checksum, so we just use a
506 static inline void svc_reserve_auth(struct svc_rqst
*rqstp
, int space
)
508 svc_reserve(rqstp
, space
+ rqstp
->rq_auth_slack
);
511 #endif /* SUNRPC_SVC_H */