1 // SPDX-License-Identifier: GPL-2.0
3 * Central processing for nfsd.
5 * Authors: Olaf Kirch (okir@monad.swb.de)
7 * Copyright (C) 1995, 1996, 1997 Olaf Kirch <okir@monad.swb.de>
10 #include <linux/sched/signal.h>
11 #include <linux/freezer.h>
12 #include <linux/module.h>
13 #include <linux/fs_struct.h>
14 #include <linux/swap.h>
15 #include <linux/siphash.h>
17 #include <linux/sunrpc/stats.h>
18 #include <linux/sunrpc/svcsock.h>
19 #include <linux/sunrpc/svc_xprt.h>
20 #include <linux/lockd/bind.h>
21 #include <linux/nfsacl.h>
22 #include <linux/nfslocalio.h>
23 #include <linux/seq_file.h>
24 #include <linux/inetdevice.h>
25 #include <net/addrconf.h>
27 #include <net/net_namespace.h>
32 #include "filecache.h"
36 #define NFSDDBG_FACILITY NFSDDBG_SVC
38 atomic_t nfsd_th_cnt
= ATOMIC_INIT(0);
39 static int nfsd(void *vrqstp
);
40 #if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
41 static int nfsd_acl_rpcbind_set(struct net
*,
42 const struct svc_program
*,
46 static __be32
nfsd_acl_init_request(struct svc_rqst
*,
47 const struct svc_program
*,
48 struct svc_process_info
*);
50 static int nfsd_rpcbind_set(struct net
*,
51 const struct svc_program
*,
55 static __be32
nfsd_init_request(struct svc_rqst
*,
56 const struct svc_program
*,
57 struct svc_process_info
*);
60 * nfsd_mutex protects nn->nfsd_serv -- both the pointer itself and some members
61 * of the svc_serv struct such as ->sv_temp_socks and ->sv_permsocks.
63 * Finally, the nfsd_mutex also protects some of the global variables that are
64 * accessed when nfsd starts and that are settable via the write_* routines in
65 * nfsctl.c. In particular:
67 * user_recovery_dirname
71 DEFINE_MUTEX(nfsd_mutex
);
74 * nfsd_drc_lock protects nfsd_drc_max_pages and nfsd_drc_pages_used.
75 * nfsd_drc_max_pages limits the total amount of memory available for
76 * version 4.1 DRC caches.
77 * nfsd_drc_pages_used tracks the current version 4.1 DRC memory usage.
79 DEFINE_SPINLOCK(nfsd_drc_lock
);
80 unsigned long nfsd_drc_max_mem
;
81 unsigned long nfsd_drc_mem_used
;
83 #if IS_ENABLED(CONFIG_NFS_LOCALIO)
84 static const struct svc_version
*localio_versions
[] = {
85 [1] = &localio_version1
,
88 #define NFSD_LOCALIO_NRVERS ARRAY_SIZE(localio_versions)
90 #endif /* CONFIG_NFS_LOCALIO */
92 #if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
93 static const struct svc_version
*nfsd_acl_version
[] = {
94 # if defined(CONFIG_NFSD_V2_ACL)
95 [2] = &nfsd_acl_version2
,
97 # if defined(CONFIG_NFSD_V3_ACL)
98 [3] = &nfsd_acl_version3
,
102 #define NFSD_ACL_MINVERS 2
103 #define NFSD_ACL_NRVERS ARRAY_SIZE(nfsd_acl_version)
105 #endif /* defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL) */
107 static const struct svc_version
*nfsd_version
[NFSD_MAXVERS
+1] = {
108 #if defined(CONFIG_NFSD_V2)
109 [2] = &nfsd_version2
,
111 [3] = &nfsd_version3
,
112 #if defined(CONFIG_NFSD_V4)
113 [4] = &nfsd_version4
,
117 struct svc_program nfsd_programs
[] = {
119 .pg_prog
= NFS_PROGRAM
, /* program number */
120 .pg_nvers
= NFSD_MAXVERS
+1, /* nr of entries in nfsd_version */
121 .pg_vers
= nfsd_version
, /* version table */
122 .pg_name
= "nfsd", /* program name */
123 .pg_class
= "nfsd", /* authentication class */
124 .pg_authenticate
= svc_set_client
, /* export authentication */
125 .pg_init_request
= nfsd_init_request
,
126 .pg_rpcbind_set
= nfsd_rpcbind_set
,
128 #if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
130 .pg_prog
= NFS_ACL_PROGRAM
,
131 .pg_nvers
= NFSD_ACL_NRVERS
,
132 .pg_vers
= nfsd_acl_version
,
135 .pg_authenticate
= svc_set_client
,
136 .pg_init_request
= nfsd_acl_init_request
,
137 .pg_rpcbind_set
= nfsd_acl_rpcbind_set
,
139 #endif /* defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL) */
140 #if IS_ENABLED(CONFIG_NFS_LOCALIO)
142 .pg_prog
= NFS_LOCALIO_PROGRAM
,
143 .pg_nvers
= NFSD_LOCALIO_NRVERS
,
144 .pg_vers
= localio_versions
,
145 .pg_name
= "nfslocalio",
147 .pg_authenticate
= svc_set_client
,
148 .pg_init_request
= svc_generic_init_request
,
149 .pg_rpcbind_set
= svc_generic_rpcbind_set
,
151 #endif /* CONFIG_NFS_LOCALIO */
154 bool nfsd_support_version(int vers
)
156 if (vers
>= NFSD_MINVERS
&& vers
<= NFSD_MAXVERS
)
157 return nfsd_version
[vers
] != NULL
;
161 int nfsd_vers(struct nfsd_net
*nn
, int vers
, enum vers_op change
)
163 if (vers
< NFSD_MINVERS
|| vers
> NFSD_MAXVERS
)
167 nn
->nfsd_versions
[vers
] = nfsd_support_version(vers
);
170 nn
->nfsd_versions
[vers
] = false;
173 return nn
->nfsd_versions
[vers
];
175 return nfsd_support_version(vers
);
181 nfsd_adjust_nfsd_versions4(struct nfsd_net
*nn
)
185 for (i
= 0; i
<= NFSD_SUPPORTED_MINOR_VERSION
; i
++) {
186 if (nn
->nfsd4_minorversions
[i
])
189 nfsd_vers(nn
, 4, NFSD_CLEAR
);
192 int nfsd_minorversion(struct nfsd_net
*nn
, u32 minorversion
, enum vers_op change
)
194 if (minorversion
> NFSD_SUPPORTED_MINOR_VERSION
&&
195 change
!= NFSD_AVAIL
)
200 nfsd_vers(nn
, 4, NFSD_SET
);
201 nn
->nfsd4_minorversions
[minorversion
] =
202 nfsd_vers(nn
, 4, NFSD_TEST
);
205 nn
->nfsd4_minorversions
[minorversion
] = false;
206 nfsd_adjust_nfsd_versions4(nn
);
209 return nn
->nfsd4_minorversions
[minorversion
];
211 return minorversion
<= NFSD_SUPPORTED_MINOR_VERSION
&&
212 nfsd_vers(nn
, 4, NFSD_AVAIL
);
217 bool nfsd_serv_try_get(struct net
*net
) __must_hold(rcu
)
219 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
221 return (nn
&& percpu_ref_tryget_live(&nn
->nfsd_serv_ref
));
224 void nfsd_serv_put(struct net
*net
) __must_hold(rcu
)
226 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
228 percpu_ref_put(&nn
->nfsd_serv_ref
);
231 static void nfsd_serv_done(struct percpu_ref
*ref
)
233 struct nfsd_net
*nn
= container_of(ref
, struct nfsd_net
, nfsd_serv_ref
);
235 complete(&nn
->nfsd_serv_confirm_done
);
238 static void nfsd_serv_free(struct percpu_ref
*ref
)
240 struct nfsd_net
*nn
= container_of(ref
, struct nfsd_net
, nfsd_serv_ref
);
242 complete(&nn
->nfsd_serv_free_done
);
246 * Maximum number of nfsd processes
248 #define NFSD_MAXSERVS 8192
250 int nfsd_nrthreads(struct net
*net
)
253 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
255 mutex_lock(&nfsd_mutex
);
257 rv
= nn
->nfsd_serv
->sv_nrthreads
;
258 mutex_unlock(&nfsd_mutex
);
262 static int nfsd_init_socks(struct net
*net
, const struct cred
*cred
)
265 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
267 if (!list_empty(&nn
->nfsd_serv
->sv_permsocks
))
270 error
= svc_xprt_create(nn
->nfsd_serv
, "udp", net
, PF_INET
, NFS_PORT
,
271 SVC_SOCK_DEFAULTS
, cred
);
275 error
= svc_xprt_create(nn
->nfsd_serv
, "tcp", net
, PF_INET
, NFS_PORT
,
276 SVC_SOCK_DEFAULTS
, cred
);
283 static int nfsd_users
= 0;
285 static int nfsd_startup_generic(void)
292 ret
= nfsd_file_cache_init();
296 ret
= nfs4_state_start();
302 nfsd_file_cache_shutdown();
308 static void nfsd_shutdown_generic(void)
313 nfs4_state_shutdown();
314 nfsd_file_cache_shutdown();
317 static bool nfsd_needs_lockd(struct nfsd_net
*nn
)
319 return nfsd_vers(nn
, 2, NFSD_TEST
) || nfsd_vers(nn
, 3, NFSD_TEST
);
323 * nfsd_copy_write_verifier - Atomically copy a write verifier
324 * @verf: buffer in which to receive the verifier cookie
325 * @nn: NFS net namespace
327 * This function provides a wait-free mechanism for copying the
328 * namespace's write verifier without tearing it.
330 void nfsd_copy_write_verifier(__be32 verf
[2], struct nfsd_net
*nn
)
335 seq
= read_seqbegin(&nn
->writeverf_lock
);
336 memcpy(verf
, nn
->writeverf
, sizeof(nn
->writeverf
));
337 } while (read_seqretry(&nn
->writeverf_lock
, seq
));
340 static void nfsd_reset_write_verifier_locked(struct nfsd_net
*nn
)
342 struct timespec64 now
;
346 * Because the time value is hashed, y2038 time_t overflow
347 * is irrelevant in this usage.
349 ktime_get_raw_ts64(&now
);
350 verf
= siphash_2u64(now
.tv_sec
, now
.tv_nsec
, &nn
->siphash_key
);
351 memcpy(nn
->writeverf
, &verf
, sizeof(nn
->writeverf
));
355 * nfsd_reset_write_verifier - Generate a new write verifier
356 * @nn: NFS net namespace
358 * This function updates the ->writeverf field of @nn. This field
359 * contains an opaque cookie that, according to Section 18.32.3 of
360 * RFC 8881, "the client can use to determine whether a server has
361 * changed instance state (e.g., server restart) between a call to
362 * WRITE and a subsequent call to either WRITE or COMMIT. This
363 * cookie MUST be unchanged during a single instance of the NFSv4.1
364 * server and MUST be unique between instances of the NFSv4.1
367 void nfsd_reset_write_verifier(struct nfsd_net
*nn
)
369 write_seqlock(&nn
->writeverf_lock
);
370 nfsd_reset_write_verifier_locked(nn
);
371 write_sequnlock(&nn
->writeverf_lock
);
375 * Crank up a set of per-namespace resources for a new NFSD instance,
376 * including lockd, a duplicate reply cache, an open file cache
377 * instance, and a cache of NFSv4 state objects.
379 static int nfsd_startup_net(struct net
*net
, const struct cred
*cred
)
381 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
387 ret
= nfsd_startup_generic();
390 ret
= nfsd_init_socks(net
, cred
);
394 if (nfsd_needs_lockd(nn
) && !nn
->lockd_up
) {
395 ret
= lockd_up(net
, cred
);
401 ret
= nfsd_file_cache_start_net(net
);
405 ret
= nfsd_reply_cache_init(nn
);
409 ret
= nfs4_state_start_net(net
);
411 goto out_reply_cache
;
413 #ifdef CONFIG_NFSD_V4_2_INTER_SSC
414 nfsd4_ssc_init_umount_work(nn
);
416 nn
->nfsd_net_up
= true;
420 nfsd_reply_cache_shutdown(nn
);
422 nfsd_file_cache_shutdown_net(net
);
426 nn
->lockd_up
= false;
429 nfsd_shutdown_generic();
433 static void nfsd_shutdown_net(struct net
*net
)
435 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
437 if (!nn
->nfsd_net_up
)
439 nfsd_export_flush(net
);
440 nfs4_state_shutdown_net(net
);
441 nfsd_reply_cache_shutdown(nn
);
442 nfsd_file_cache_shutdown_net(net
);
445 nn
->lockd_up
= false;
447 percpu_ref_exit(&nn
->nfsd_serv_ref
);
448 nn
->nfsd_net_up
= false;
449 nfsd_shutdown_generic();
452 static DEFINE_SPINLOCK(nfsd_notifier_lock
);
453 static int nfsd_inetaddr_event(struct notifier_block
*this, unsigned long event
,
456 struct in_ifaddr
*ifa
= (struct in_ifaddr
*)ptr
;
457 struct net_device
*dev
= ifa
->ifa_dev
->dev
;
458 struct net
*net
= dev_net(dev
);
459 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
460 struct sockaddr_in sin
;
462 if (event
!= NETDEV_DOWN
|| !nn
->nfsd_serv
)
465 spin_lock(&nfsd_notifier_lock
);
467 dprintk("nfsd_inetaddr_event: removed %pI4\n", &ifa
->ifa_local
);
468 sin
.sin_family
= AF_INET
;
469 sin
.sin_addr
.s_addr
= ifa
->ifa_local
;
470 svc_age_temp_xprts_now(nn
->nfsd_serv
, (struct sockaddr
*)&sin
);
472 spin_unlock(&nfsd_notifier_lock
);
478 static struct notifier_block nfsd_inetaddr_notifier
= {
479 .notifier_call
= nfsd_inetaddr_event
,
482 #if IS_ENABLED(CONFIG_IPV6)
483 static int nfsd_inet6addr_event(struct notifier_block
*this,
484 unsigned long event
, void *ptr
)
486 struct inet6_ifaddr
*ifa
= (struct inet6_ifaddr
*)ptr
;
487 struct net_device
*dev
= ifa
->idev
->dev
;
488 struct net
*net
= dev_net(dev
);
489 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
490 struct sockaddr_in6 sin6
;
492 if (event
!= NETDEV_DOWN
|| !nn
->nfsd_serv
)
495 spin_lock(&nfsd_notifier_lock
);
497 dprintk("nfsd_inet6addr_event: removed %pI6\n", &ifa
->addr
);
498 sin6
.sin6_family
= AF_INET6
;
499 sin6
.sin6_addr
= ifa
->addr
;
500 if (ipv6_addr_type(&sin6
.sin6_addr
) & IPV6_ADDR_LINKLOCAL
)
501 sin6
.sin6_scope_id
= ifa
->idev
->dev
->ifindex
;
502 svc_age_temp_xprts_now(nn
->nfsd_serv
, (struct sockaddr
*)&sin6
);
504 spin_unlock(&nfsd_notifier_lock
);
510 static struct notifier_block nfsd_inet6addr_notifier
= {
511 .notifier_call
= nfsd_inet6addr_event
,
515 /* Only used under nfsd_mutex, so this atomic may be overkill: */
516 static atomic_t nfsd_notifier_refcount
= ATOMIC_INIT(0);
519 * nfsd_destroy_serv - tear down NFSD's svc_serv for a namespace
520 * @net: network namespace the NFS service is associated with
522 void nfsd_destroy_serv(struct net
*net
)
524 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
525 struct svc_serv
*serv
= nn
->nfsd_serv
;
527 lockdep_assert_held(&nfsd_mutex
);
529 percpu_ref_kill_and_confirm(&nn
->nfsd_serv_ref
, nfsd_serv_done
);
530 wait_for_completion(&nn
->nfsd_serv_confirm_done
);
531 wait_for_completion(&nn
->nfsd_serv_free_done
);
532 /* percpu_ref_exit is called in nfsd_shutdown_net */
534 spin_lock(&nfsd_notifier_lock
);
535 nn
->nfsd_serv
= NULL
;
536 spin_unlock(&nfsd_notifier_lock
);
538 /* check if the notifier still has clients */
539 if (atomic_dec_return(&nfsd_notifier_refcount
) == 0) {
540 unregister_inetaddr_notifier(&nfsd_inetaddr_notifier
);
541 #if IS_ENABLED(CONFIG_IPV6)
542 unregister_inet6addr_notifier(&nfsd_inet6addr_notifier
);
546 svc_xprt_destroy_all(serv
, net
);
549 * write_ports can create the server without actually starting
550 * any threads--if we get shut down before any threads are
551 * started, then nfsd_destroy_serv will be run before any of this
552 * other initialization has been done except the rpcb information.
554 svc_rpcb_cleanup(serv
, net
);
556 nfsd_shutdown_net(net
);
560 void nfsd_reset_versions(struct nfsd_net
*nn
)
564 for (i
= 0; i
<= NFSD_MAXVERS
; i
++)
565 if (nfsd_vers(nn
, i
, NFSD_TEST
))
568 for (i
= 0; i
<= NFSD_MAXVERS
; i
++)
570 nfsd_vers(nn
, i
, NFSD_SET
);
573 while (nfsd_minorversion(nn
, minor
, NFSD_SET
) >= 0)
579 * Each session guarantees a negotiated per slot memory cache for replies
580 * which in turn consumes memory beyond the v2/v3/v4.0 server. A dedicated
581 * NFSv4.1 server might want to use more memory for a DRC than a machine
582 * with mutiple services.
584 * Impose a hard limit on the number of pages for the DRC which varies
585 * according to the machines free pages. This is of course only a default.
587 * For now this is a #defined shift which could be under admin control
590 static void set_max_drc(void)
592 #define NFSD_DRC_SIZE_SHIFT 7
593 nfsd_drc_max_mem
= (nr_free_buffer_pages()
594 >> NFSD_DRC_SIZE_SHIFT
) * PAGE_SIZE
;
595 nfsd_drc_mem_used
= 0;
596 dprintk("%s nfsd_drc_max_mem %lu \n", __func__
, nfsd_drc_max_mem
);
599 static int nfsd_get_default_max_blksize(void)
602 unsigned long long target
;
606 target
= (i
.totalram
- i
.totalhigh
) << PAGE_SHIFT
;
608 * Aim for 1/4096 of memory per thread This gives 1MB on 4Gig
609 * machines, but only uses 32K on 128M machines. Bottom out at
610 * 8K on 32M and smaller. Of course, this is only a default.
614 ret
= NFSSVC_MAXBLKSIZE
;
615 while (ret
> target
&& ret
>= 8*1024*2)
620 void nfsd_shutdown_threads(struct net
*net
)
622 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
623 struct svc_serv
*serv
;
625 mutex_lock(&nfsd_mutex
);
626 serv
= nn
->nfsd_serv
;
628 mutex_unlock(&nfsd_mutex
);
632 /* Kill outstanding nfsd threads */
633 svc_set_num_threads(serv
, NULL
, 0);
634 nfsd_destroy_serv(net
);
635 mutex_unlock(&nfsd_mutex
);
638 struct svc_rqst
*nfsd_current_rqst(void)
640 if (kthread_func(current
) == nfsd
)
641 return kthread_data(current
);
645 int nfsd_create_serv(struct net
*net
)
648 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
649 struct svc_serv
*serv
;
651 WARN_ON(!mutex_is_locked(&nfsd_mutex
));
655 error
= percpu_ref_init(&nn
->nfsd_serv_ref
, nfsd_serv_free
,
659 init_completion(&nn
->nfsd_serv_free_done
);
660 init_completion(&nn
->nfsd_serv_confirm_done
);
662 if (nfsd_max_blksize
== 0)
663 nfsd_max_blksize
= nfsd_get_default_max_blksize();
664 nfsd_reset_versions(nn
);
665 serv
= svc_create_pooled(nfsd_programs
, ARRAY_SIZE(nfsd_programs
),
667 nfsd_max_blksize
, nfsd
);
671 serv
->sv_maxconn
= nn
->max_connections
;
672 error
= svc_bind(serv
, net
);
677 spin_lock(&nfsd_notifier_lock
);
678 nn
->nfsd_serv
= serv
;
679 spin_unlock(&nfsd_notifier_lock
);
682 /* check if the notifier is already set */
683 if (atomic_inc_return(&nfsd_notifier_refcount
) == 1) {
684 register_inetaddr_notifier(&nfsd_inetaddr_notifier
);
685 #if IS_ENABLED(CONFIG_IPV6)
686 register_inet6addr_notifier(&nfsd_inet6addr_notifier
);
689 nfsd_reset_write_verifier(nn
);
693 int nfsd_nrpools(struct net
*net
)
695 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
697 if (nn
->nfsd_serv
== NULL
)
700 return nn
->nfsd_serv
->sv_nrpools
;
703 int nfsd_get_nrthreads(int n
, int *nthreads
, struct net
*net
)
705 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
706 struct svc_serv
*serv
= nn
->nfsd_serv
;
710 for (i
= 0; i
< serv
->sv_nrpools
&& i
< n
; i
++)
711 nthreads
[i
] = serv
->sv_pools
[i
].sp_nrthreads
;
716 * nfsd_set_nrthreads - set the number of running threads in the net's service
717 * @n: number of array members in @nthreads
718 * @nthreads: array of thread counts for each pool
719 * @net: network namespace to operate within
721 * This function alters the number of running threads for the given network
722 * namespace in each pool. If passed an array longer then the number of pools
723 * the extra pool settings are ignored. If passed an array shorter than the
724 * number of pools, the missing values are interpreted as 0's.
726 * Returns 0 on success or a negative errno on error.
728 int nfsd_set_nrthreads(int n
, int *nthreads
, struct net
*net
)
733 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
735 lockdep_assert_held(&nfsd_mutex
);
737 if (nn
->nfsd_serv
== NULL
|| n
<= 0)
741 * Special case: When n == 1, pass in NULL for the pool, so that the
742 * change is distributed equally among them.
745 return svc_set_num_threads(nn
->nfsd_serv
, NULL
, nthreads
[0]);
747 if (n
> nn
->nfsd_serv
->sv_nrpools
)
748 n
= nn
->nfsd_serv
->sv_nrpools
;
750 /* enforce a global maximum number of threads */
752 for (i
= 0; i
< n
; i
++) {
753 nthreads
[i
] = min(nthreads
[i
], NFSD_MAXSERVS
);
756 if (tot
> NFSD_MAXSERVS
) {
757 /* total too large: scale down requested numbers */
758 for (i
= 0; i
< n
&& tot
> 0; i
++) {
759 int new = nthreads
[i
] * NFSD_MAXSERVS
/ tot
;
760 tot
-= (nthreads
[i
] - new);
763 for (i
= 0; i
< n
&& tot
> 0; i
++) {
769 /* apply the new numbers */
770 for (i
= 0; i
< n
; i
++) {
771 err
= svc_set_num_threads(nn
->nfsd_serv
,
772 &nn
->nfsd_serv
->sv_pools
[i
],
778 /* Anything undefined in array is considered to be 0 */
779 for (i
= n
; i
< nn
->nfsd_serv
->sv_nrpools
; ++i
) {
780 err
= svc_set_num_threads(nn
->nfsd_serv
,
781 &nn
->nfsd_serv
->sv_pools
[i
],
791 * nfsd_svc: start up or shut down the nfsd server
792 * @n: number of array members in @nthreads
793 * @nthreads: array of thread counts for each pool
794 * @net: network namespace to operate within
795 * @cred: credentials to use for xprt creation
796 * @scope: server scope value (defaults to nodename)
798 * Adjust the number of threads in each pool and return the new
799 * total number of threads in the service.
802 nfsd_svc(int n
, int *nthreads
, struct net
*net
, const struct cred
*cred
, const char *scope
)
805 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
806 struct svc_serv
*serv
;
808 lockdep_assert_held(&nfsd_mutex
);
810 dprintk("nfsd: creating service\n");
812 strscpy(nn
->nfsd_name
, scope
? scope
: utsname()->nodename
,
813 sizeof(nn
->nfsd_name
));
815 error
= nfsd_create_serv(net
);
818 serv
= nn
->nfsd_serv
;
820 error
= nfsd_startup_net(net
, cred
);
823 error
= nfsd_set_nrthreads(n
, nthreads
, net
);
826 error
= serv
->sv_nrthreads
;
828 if (serv
->sv_nrthreads
== 0)
829 nfsd_destroy_serv(net
);
834 #if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
836 nfsd_support_acl_version(int vers
)
838 if (vers
>= NFSD_ACL_MINVERS
&& vers
< NFSD_ACL_NRVERS
)
839 return nfsd_acl_version
[vers
] != NULL
;
844 nfsd_acl_rpcbind_set(struct net
*net
, const struct svc_program
*progp
,
845 u32 version
, int family
, unsigned short proto
,
848 if (!nfsd_support_acl_version(version
) ||
849 !nfsd_vers(net_generic(net
, nfsd_net_id
), version
, NFSD_TEST
))
851 return svc_generic_rpcbind_set(net
, progp
, version
, family
,
856 nfsd_acl_init_request(struct svc_rqst
*rqstp
,
857 const struct svc_program
*progp
,
858 struct svc_process_info
*ret
)
860 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
863 if (likely(nfsd_support_acl_version(rqstp
->rq_vers
) &&
864 nfsd_vers(nn
, rqstp
->rq_vers
, NFSD_TEST
)))
865 return svc_generic_init_request(rqstp
, progp
, ret
);
867 ret
->mismatch
.lovers
= NFSD_ACL_NRVERS
;
868 for (i
= NFSD_ACL_MINVERS
; i
< NFSD_ACL_NRVERS
; i
++) {
869 if (nfsd_support_acl_version(rqstp
->rq_vers
) &&
870 nfsd_vers(nn
, i
, NFSD_TEST
)) {
871 ret
->mismatch
.lovers
= i
;
875 if (ret
->mismatch
.lovers
== NFSD_ACL_NRVERS
)
876 return rpc_prog_unavail
;
877 ret
->mismatch
.hivers
= NFSD_ACL_MINVERS
;
878 for (i
= NFSD_ACL_NRVERS
- 1; i
>= NFSD_ACL_MINVERS
; i
--) {
879 if (nfsd_support_acl_version(rqstp
->rq_vers
) &&
880 nfsd_vers(nn
, i
, NFSD_TEST
)) {
881 ret
->mismatch
.hivers
= i
;
885 return rpc_prog_mismatch
;
890 nfsd_rpcbind_set(struct net
*net
, const struct svc_program
*progp
,
891 u32 version
, int family
, unsigned short proto
,
894 if (!nfsd_vers(net_generic(net
, nfsd_net_id
), version
, NFSD_TEST
))
896 return svc_generic_rpcbind_set(net
, progp
, version
, family
,
901 nfsd_init_request(struct svc_rqst
*rqstp
,
902 const struct svc_program
*progp
,
903 struct svc_process_info
*ret
)
905 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
908 if (likely(nfsd_vers(nn
, rqstp
->rq_vers
, NFSD_TEST
)))
909 return svc_generic_init_request(rqstp
, progp
, ret
);
911 ret
->mismatch
.lovers
= NFSD_MAXVERS
+ 1;
912 for (i
= NFSD_MINVERS
; i
<= NFSD_MAXVERS
; i
++) {
913 if (nfsd_vers(nn
, i
, NFSD_TEST
)) {
914 ret
->mismatch
.lovers
= i
;
918 if (ret
->mismatch
.lovers
> NFSD_MAXVERS
)
919 return rpc_prog_unavail
;
920 ret
->mismatch
.hivers
= NFSD_MINVERS
;
921 for (i
= NFSD_MAXVERS
; i
>= NFSD_MINVERS
; i
--) {
922 if (nfsd_vers(nn
, i
, NFSD_TEST
)) {
923 ret
->mismatch
.hivers
= i
;
927 return rpc_prog_mismatch
;
931 * This is the NFS server kernel thread
936 struct svc_rqst
*rqstp
= (struct svc_rqst
*) vrqstp
;
937 struct svc_xprt
*perm_sock
= list_entry(rqstp
->rq_server
->sv_permsocks
.next
, typeof(struct svc_xprt
), xpt_list
);
938 struct net
*net
= perm_sock
->xpt_net
;
939 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
941 /* At this point, the thread shares current->fs
942 * with the init process. We need to create files with the
943 * umask as defined by the client instead of init's umask.
945 svc_thread_init_status(rqstp
, unshare_fs_struct());
947 current
->fs
->umask
= 0;
949 atomic_inc(&nfsd_th_cnt
);
954 * The main request loop
956 while (!svc_thread_should_stop(rqstp
)) {
957 /* Update sv_maxconn if it has changed */
958 rqstp
->rq_server
->sv_maxconn
= nn
->max_connections
;
962 nfsd_file_net_dispose(nn
);
965 atomic_dec(&nfsd_th_cnt
);
967 /* Release the thread */
968 svc_exit_thread(rqstp
);
973 * nfsd_dispatch - Process an NFS or NFSACL or LOCALIO Request
974 * @rqstp: incoming request
976 * This RPC dispatcher integrates the NFS server's duplicate reply cache.
979 * %0: Processing complete; do not send a Reply
980 * %1: Processing complete; send Reply in rqstp->rq_res
982 int nfsd_dispatch(struct svc_rqst
*rqstp
)
984 const struct svc_procedure
*proc
= rqstp
->rq_procinfo
;
985 __be32
*statp
= rqstp
->rq_accept_statp
;
986 struct nfsd_cacherep
*rp
;
987 unsigned int start
, len
;
991 * Give the xdr decoder a chance to change this if it wants
992 * (necessary in the NFSv4.0 compound case)
994 rqstp
->rq_cachetype
= proc
->pc_cachetype
;
997 * ->pc_decode advances the argument stream past the NFS
998 * Call header, so grab the header's starting location and
999 * size now for the call to nfsd_cache_lookup().
1001 start
= xdr_stream_pos(&rqstp
->rq_arg_stream
);
1002 len
= xdr_stream_remaining(&rqstp
->rq_arg_stream
);
1003 if (!proc
->pc_decode(rqstp
, &rqstp
->rq_arg_stream
))
1004 goto out_decode_err
;
1007 * Release rq_status_counter setting it to an odd value after the rpc
1008 * request has been properly parsed. rq_status_counter is used to
1009 * notify the consumers if the rqstp fields are stable
1010 * (rq_status_counter is odd) or not meaningful (rq_status_counter
1013 smp_store_release(&rqstp
->rq_status_counter
, rqstp
->rq_status_counter
| 1);
1016 switch (nfsd_cache_lookup(rqstp
, start
, len
, &rp
)) {
1020 goto out_cached_reply
;
1025 nfs_reply
= xdr_inline_decode(&rqstp
->rq_res_stream
, 0);
1026 *statp
= proc
->pc_func(rqstp
);
1027 if (test_bit(RQ_DROPME
, &rqstp
->rq_flags
))
1028 goto out_update_drop
;
1030 if (!proc
->pc_encode(rqstp
, &rqstp
->rq_res_stream
))
1031 goto out_encode_err
;
1034 * Release rq_status_counter setting it to an even value after the rpc
1035 * request has been properly processed.
1037 smp_store_release(&rqstp
->rq_status_counter
, rqstp
->rq_status_counter
+ 1);
1039 nfsd_cache_update(rqstp
, rp
, rqstp
->rq_cachetype
, nfs_reply
);
1044 trace_nfsd_garbage_args_err(rqstp
);
1045 *statp
= rpc_garbage_args
;
1049 nfsd_cache_update(rqstp
, rp
, RC_NOCACHE
, NULL
);
1054 trace_nfsd_cant_encode_err(rqstp
);
1055 nfsd_cache_update(rqstp
, rp
, RC_NOCACHE
, NULL
);
1056 *statp
= rpc_system_err
;
1061 * nfssvc_decode_voidarg - Decode void arguments
1062 * @rqstp: Server RPC transaction context
1063 * @xdr: XDR stream positioned at arguments to decode
1066 * %false: Arguments were not valid
1067 * %true: Decoding was successful
1069 bool nfssvc_decode_voidarg(struct svc_rqst
*rqstp
, struct xdr_stream
*xdr
)
1075 * nfssvc_encode_voidres - Encode void results
1076 * @rqstp: Server RPC transaction context
1077 * @xdr: XDR stream into which to encode results
1080 * %false: Local error while encoding
1081 * %true: Encoding was successful
1083 bool nfssvc_encode_voidres(struct svc_rqst
*rqstp
, struct xdr_stream
*xdr
)