2 * Copyright (c) 2001 The Regents of the University of Michigan.
5 * Kendrick Smith <kmsmith@umich.edu>
6 * Andy Adamson <kandros@umich.edu>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the name of the University nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
22 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
23 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
28 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
29 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
30 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
31 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 #include <linux/file.h>
37 #include <linux/slab.h>
38 #include <linux/namei.h>
39 #include <linux/swap.h>
40 #include <linux/pagemap.h>
41 #include <linux/ratelimit.h>
42 #include <linux/sunrpc/svcauth_gss.h>
43 #include <linux/sunrpc/addr.h>
44 #include <linux/jhash.h>
45 #include <linux/string_helpers.h>
49 #include "current_stateid.h"
53 #include "filecache.h"
56 #define NFSDDBG_FACILITY NFSDDBG_PROC
58 #define all_ones {{~0,~0},~0}
59 static const stateid_t one_stateid
= {
61 .si_opaque
= all_ones
,
63 static const stateid_t zero_stateid
= {
66 static const stateid_t currentstateid
= {
69 static const stateid_t close_stateid
= {
70 .si_generation
= 0xffffffffU
,
73 static u64 current_sessionid
= 1;
75 #define ZERO_STATEID(stateid) (!memcmp((stateid), &zero_stateid, sizeof(stateid_t)))
76 #define ONE_STATEID(stateid) (!memcmp((stateid), &one_stateid, sizeof(stateid_t)))
77 #define CURRENT_STATEID(stateid) (!memcmp((stateid), ¤tstateid, sizeof(stateid_t)))
78 #define CLOSE_STATEID(stateid) (!memcmp((stateid), &close_stateid, sizeof(stateid_t)))
80 /* forward declarations */
81 static bool check_for_locks(struct nfs4_file
*fp
, struct nfs4_lockowner
*lowner
);
82 static void nfs4_free_ol_stateid(struct nfs4_stid
*stid
);
83 void nfsd4_end_grace(struct nfsd_net
*nn
);
84 static void _free_cpntf_state_locked(struct nfsd_net
*nn
, struct nfs4_cpntf_state
*cps
);
89 * Currently used for the del_recall_lru and file hash table. In an
90 * effort to decrease the scope of the client_mutex, this spinlock may
91 * eventually cover more:
93 static DEFINE_SPINLOCK(state_lock
);
95 enum nfsd4_st_mutex_lock_subclass
{
96 OPEN_STATEID_MUTEX
= 0,
97 LOCK_STATEID_MUTEX
= 1,
101 * A waitqueue for all in-progress 4.0 CLOSE operations that are waiting for
102 * the refcount on the open stateid to drop.
104 static DECLARE_WAIT_QUEUE_HEAD(close_wq
);
107 * A waitqueue where a writer to clients/#/ctl destroying a client can
108 * wait for cl_rpc_users to drop to 0 and then for the client to be
111 static DECLARE_WAIT_QUEUE_HEAD(expiry_wq
);
113 static struct kmem_cache
*client_slab
;
114 static struct kmem_cache
*openowner_slab
;
115 static struct kmem_cache
*lockowner_slab
;
116 static struct kmem_cache
*file_slab
;
117 static struct kmem_cache
*stateid_slab
;
118 static struct kmem_cache
*deleg_slab
;
119 static struct kmem_cache
*odstate_slab
;
121 static void free_session(struct nfsd4_session
*);
123 static const struct nfsd4_callback_ops nfsd4_cb_recall_ops
;
124 static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops
;
126 static bool is_session_dead(struct nfsd4_session
*ses
)
128 return ses
->se_flags
& NFS4_SESSION_DEAD
;
131 static __be32
mark_session_dead_locked(struct nfsd4_session
*ses
, int ref_held_by_me
)
133 if (atomic_read(&ses
->se_ref
) > ref_held_by_me
)
134 return nfserr_jukebox
;
135 ses
->se_flags
|= NFS4_SESSION_DEAD
;
139 static bool is_client_expired(struct nfs4_client
*clp
)
141 return clp
->cl_time
== 0;
144 static __be32
get_client_locked(struct nfs4_client
*clp
)
146 struct nfsd_net
*nn
= net_generic(clp
->net
, nfsd_net_id
);
148 lockdep_assert_held(&nn
->client_lock
);
150 if (is_client_expired(clp
))
151 return nfserr_expired
;
152 atomic_inc(&clp
->cl_rpc_users
);
156 /* must be called under the client_lock */
158 renew_client_locked(struct nfs4_client
*clp
)
160 struct nfsd_net
*nn
= net_generic(clp
->net
, nfsd_net_id
);
162 if (is_client_expired(clp
)) {
164 printk("%s: client (clientid %08x/%08x) already expired\n",
166 clp
->cl_clientid
.cl_boot
,
167 clp
->cl_clientid
.cl_id
);
171 list_move_tail(&clp
->cl_lru
, &nn
->client_lru
);
172 clp
->cl_time
= ktime_get_boottime_seconds();
175 static void put_client_renew_locked(struct nfs4_client
*clp
)
177 struct nfsd_net
*nn
= net_generic(clp
->net
, nfsd_net_id
);
179 lockdep_assert_held(&nn
->client_lock
);
181 if (!atomic_dec_and_test(&clp
->cl_rpc_users
))
183 if (!is_client_expired(clp
))
184 renew_client_locked(clp
);
186 wake_up_all(&expiry_wq
);
189 static void put_client_renew(struct nfs4_client
*clp
)
191 struct nfsd_net
*nn
= net_generic(clp
->net
, nfsd_net_id
);
193 if (!atomic_dec_and_lock(&clp
->cl_rpc_users
, &nn
->client_lock
))
195 if (!is_client_expired(clp
))
196 renew_client_locked(clp
);
198 wake_up_all(&expiry_wq
);
199 spin_unlock(&nn
->client_lock
);
202 static __be32
nfsd4_get_session_locked(struct nfsd4_session
*ses
)
206 if (is_session_dead(ses
))
207 return nfserr_badsession
;
208 status
= get_client_locked(ses
->se_client
);
211 atomic_inc(&ses
->se_ref
);
215 static void nfsd4_put_session_locked(struct nfsd4_session
*ses
)
217 struct nfs4_client
*clp
= ses
->se_client
;
218 struct nfsd_net
*nn
= net_generic(clp
->net
, nfsd_net_id
);
220 lockdep_assert_held(&nn
->client_lock
);
222 if (atomic_dec_and_test(&ses
->se_ref
) && is_session_dead(ses
))
224 put_client_renew_locked(clp
);
227 static void nfsd4_put_session(struct nfsd4_session
*ses
)
229 struct nfs4_client
*clp
= ses
->se_client
;
230 struct nfsd_net
*nn
= net_generic(clp
->net
, nfsd_net_id
);
232 spin_lock(&nn
->client_lock
);
233 nfsd4_put_session_locked(ses
);
234 spin_unlock(&nn
->client_lock
);
237 static struct nfsd4_blocked_lock
*
238 find_blocked_lock(struct nfs4_lockowner
*lo
, struct knfsd_fh
*fh
,
241 struct nfsd4_blocked_lock
*cur
, *found
= NULL
;
243 spin_lock(&nn
->blocked_locks_lock
);
244 list_for_each_entry(cur
, &lo
->lo_blocked
, nbl_list
) {
245 if (fh_match(fh
, &cur
->nbl_fh
)) {
246 list_del_init(&cur
->nbl_list
);
247 list_del_init(&cur
->nbl_lru
);
252 spin_unlock(&nn
->blocked_locks_lock
);
254 locks_delete_block(&found
->nbl_lock
);
258 static struct nfsd4_blocked_lock
*
259 find_or_allocate_block(struct nfs4_lockowner
*lo
, struct knfsd_fh
*fh
,
262 struct nfsd4_blocked_lock
*nbl
;
264 nbl
= find_blocked_lock(lo
, fh
, nn
);
266 nbl
= kmalloc(sizeof(*nbl
), GFP_KERNEL
);
268 INIT_LIST_HEAD(&nbl
->nbl_list
);
269 INIT_LIST_HEAD(&nbl
->nbl_lru
);
270 fh_copy_shallow(&nbl
->nbl_fh
, fh
);
271 locks_init_lock(&nbl
->nbl_lock
);
272 nfsd4_init_cb(&nbl
->nbl_cb
, lo
->lo_owner
.so_client
,
273 &nfsd4_cb_notify_lock_ops
,
274 NFSPROC4_CLNT_CB_NOTIFY_LOCK
);
281 free_blocked_lock(struct nfsd4_blocked_lock
*nbl
)
283 locks_delete_block(&nbl
->nbl_lock
);
284 locks_release_private(&nbl
->nbl_lock
);
289 remove_blocked_locks(struct nfs4_lockowner
*lo
)
291 struct nfs4_client
*clp
= lo
->lo_owner
.so_client
;
292 struct nfsd_net
*nn
= net_generic(clp
->net
, nfsd_net_id
);
293 struct nfsd4_blocked_lock
*nbl
;
296 /* Dequeue all blocked locks */
297 spin_lock(&nn
->blocked_locks_lock
);
298 while (!list_empty(&lo
->lo_blocked
)) {
299 nbl
= list_first_entry(&lo
->lo_blocked
,
300 struct nfsd4_blocked_lock
,
302 list_del_init(&nbl
->nbl_list
);
303 list_move(&nbl
->nbl_lru
, &reaplist
);
305 spin_unlock(&nn
->blocked_locks_lock
);
308 while (!list_empty(&reaplist
)) {
309 nbl
= list_first_entry(&reaplist
, struct nfsd4_blocked_lock
,
311 list_del_init(&nbl
->nbl_lru
);
312 free_blocked_lock(nbl
);
317 nfsd4_cb_notify_lock_prepare(struct nfsd4_callback
*cb
)
319 struct nfsd4_blocked_lock
*nbl
= container_of(cb
,
320 struct nfsd4_blocked_lock
, nbl_cb
);
321 locks_delete_block(&nbl
->nbl_lock
);
325 nfsd4_cb_notify_lock_done(struct nfsd4_callback
*cb
, struct rpc_task
*task
)
328 * Since this is just an optimization, we don't try very hard if it
329 * turns out not to succeed. We'll requeue it on NFS4ERR_DELAY, and
330 * just quit trying on anything else.
332 switch (task
->tk_status
) {
334 rpc_delay(task
, 1 * HZ
);
342 nfsd4_cb_notify_lock_release(struct nfsd4_callback
*cb
)
344 struct nfsd4_blocked_lock
*nbl
= container_of(cb
,
345 struct nfsd4_blocked_lock
, nbl_cb
);
347 free_blocked_lock(nbl
);
350 static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops
= {
351 .prepare
= nfsd4_cb_notify_lock_prepare
,
352 .done
= nfsd4_cb_notify_lock_done
,
353 .release
= nfsd4_cb_notify_lock_release
,
356 static inline struct nfs4_stateowner
*
357 nfs4_get_stateowner(struct nfs4_stateowner
*sop
)
359 atomic_inc(&sop
->so_count
);
364 same_owner_str(struct nfs4_stateowner
*sop
, struct xdr_netobj
*owner
)
366 return (sop
->so_owner
.len
== owner
->len
) &&
367 0 == memcmp(sop
->so_owner
.data
, owner
->data
, owner
->len
);
370 static struct nfs4_openowner
*
371 find_openstateowner_str_locked(unsigned int hashval
, struct nfsd4_open
*open
,
372 struct nfs4_client
*clp
)
374 struct nfs4_stateowner
*so
;
376 lockdep_assert_held(&clp
->cl_lock
);
378 list_for_each_entry(so
, &clp
->cl_ownerstr_hashtbl
[hashval
],
380 if (!so
->so_is_open_owner
)
382 if (same_owner_str(so
, &open
->op_owner
))
383 return openowner(nfs4_get_stateowner(so
));
388 static struct nfs4_openowner
*
389 find_openstateowner_str(unsigned int hashval
, struct nfsd4_open
*open
,
390 struct nfs4_client
*clp
)
392 struct nfs4_openowner
*oo
;
394 spin_lock(&clp
->cl_lock
);
395 oo
= find_openstateowner_str_locked(hashval
, open
, clp
);
396 spin_unlock(&clp
->cl_lock
);
401 opaque_hashval(const void *ptr
, int nbytes
)
403 unsigned char *cptr
= (unsigned char *) ptr
;
413 static void nfsd4_free_file_rcu(struct rcu_head
*rcu
)
415 struct nfs4_file
*fp
= container_of(rcu
, struct nfs4_file
, fi_rcu
);
417 kmem_cache_free(file_slab
, fp
);
421 put_nfs4_file(struct nfs4_file
*fi
)
423 might_lock(&state_lock
);
425 if (refcount_dec_and_lock(&fi
->fi_ref
, &state_lock
)) {
426 hlist_del_rcu(&fi
->fi_hash
);
427 spin_unlock(&state_lock
);
428 WARN_ON_ONCE(!list_empty(&fi
->fi_clnt_odstate
));
429 WARN_ON_ONCE(!list_empty(&fi
->fi_delegations
));
430 call_rcu(&fi
->fi_rcu
, nfsd4_free_file_rcu
);
434 static struct nfsd_file
*
435 __nfs4_get_fd(struct nfs4_file
*f
, int oflag
)
437 if (f
->fi_fds
[oflag
])
438 return nfsd_file_get(f
->fi_fds
[oflag
]);
442 static struct nfsd_file
*
443 find_writeable_file_locked(struct nfs4_file
*f
)
445 struct nfsd_file
*ret
;
447 lockdep_assert_held(&f
->fi_lock
);
449 ret
= __nfs4_get_fd(f
, O_WRONLY
);
451 ret
= __nfs4_get_fd(f
, O_RDWR
);
455 static struct nfsd_file
*
456 find_writeable_file(struct nfs4_file
*f
)
458 struct nfsd_file
*ret
;
460 spin_lock(&f
->fi_lock
);
461 ret
= find_writeable_file_locked(f
);
462 spin_unlock(&f
->fi_lock
);
467 static struct nfsd_file
*
468 find_readable_file_locked(struct nfs4_file
*f
)
470 struct nfsd_file
*ret
;
472 lockdep_assert_held(&f
->fi_lock
);
474 ret
= __nfs4_get_fd(f
, O_RDONLY
);
476 ret
= __nfs4_get_fd(f
, O_RDWR
);
480 static struct nfsd_file
*
481 find_readable_file(struct nfs4_file
*f
)
483 struct nfsd_file
*ret
;
485 spin_lock(&f
->fi_lock
);
486 ret
= find_readable_file_locked(f
);
487 spin_unlock(&f
->fi_lock
);
493 find_any_file(struct nfs4_file
*f
)
495 struct nfsd_file
*ret
;
499 spin_lock(&f
->fi_lock
);
500 ret
= __nfs4_get_fd(f
, O_RDWR
);
502 ret
= __nfs4_get_fd(f
, O_WRONLY
);
504 ret
= __nfs4_get_fd(f
, O_RDONLY
);
506 spin_unlock(&f
->fi_lock
);
510 static struct nfsd_file
*find_deleg_file(struct nfs4_file
*f
)
512 struct nfsd_file
*ret
= NULL
;
514 spin_lock(&f
->fi_lock
);
515 if (f
->fi_deleg_file
)
516 ret
= nfsd_file_get(f
->fi_deleg_file
);
517 spin_unlock(&f
->fi_lock
);
521 static atomic_long_t num_delegations
;
522 unsigned long max_delegations
;
525 * Open owner state (share locks)
528 /* hash tables for lock and open owners */
529 #define OWNER_HASH_BITS 8
530 #define OWNER_HASH_SIZE (1 << OWNER_HASH_BITS)
531 #define OWNER_HASH_MASK (OWNER_HASH_SIZE - 1)
533 static unsigned int ownerstr_hashval(struct xdr_netobj
*ownername
)
537 ret
= opaque_hashval(ownername
->data
, ownername
->len
);
538 return ret
& OWNER_HASH_MASK
;
541 /* hash table for nfs4_file */
542 #define FILE_HASH_BITS 8
543 #define FILE_HASH_SIZE (1 << FILE_HASH_BITS)
545 static unsigned int nfsd_fh_hashval(struct knfsd_fh
*fh
)
547 return jhash2(fh
->fh_base
.fh_pad
, XDR_QUADLEN(fh
->fh_size
), 0);
550 static unsigned int file_hashval(struct knfsd_fh
*fh
)
552 return nfsd_fh_hashval(fh
) & (FILE_HASH_SIZE
- 1);
555 static struct hlist_head file_hashtbl
[FILE_HASH_SIZE
];
558 __nfs4_file_get_access(struct nfs4_file
*fp
, u32 access
)
560 lockdep_assert_held(&fp
->fi_lock
);
562 if (access
& NFS4_SHARE_ACCESS_WRITE
)
563 atomic_inc(&fp
->fi_access
[O_WRONLY
]);
564 if (access
& NFS4_SHARE_ACCESS_READ
)
565 atomic_inc(&fp
->fi_access
[O_RDONLY
]);
569 nfs4_file_get_access(struct nfs4_file
*fp
, u32 access
)
571 lockdep_assert_held(&fp
->fi_lock
);
573 /* Does this access mode make sense? */
574 if (access
& ~NFS4_SHARE_ACCESS_BOTH
)
577 /* Does it conflict with a deny mode already set? */
578 if ((access
& fp
->fi_share_deny
) != 0)
579 return nfserr_share_denied
;
581 __nfs4_file_get_access(fp
, access
);
585 static __be32
nfs4_file_check_deny(struct nfs4_file
*fp
, u32 deny
)
587 /* Common case is that there is no deny mode. */
589 /* Does this deny mode make sense? */
590 if (deny
& ~NFS4_SHARE_DENY_BOTH
)
593 if ((deny
& NFS4_SHARE_DENY_READ
) &&
594 atomic_read(&fp
->fi_access
[O_RDONLY
]))
595 return nfserr_share_denied
;
597 if ((deny
& NFS4_SHARE_DENY_WRITE
) &&
598 atomic_read(&fp
->fi_access
[O_WRONLY
]))
599 return nfserr_share_denied
;
604 static void __nfs4_file_put_access(struct nfs4_file
*fp
, int oflag
)
606 might_lock(&fp
->fi_lock
);
608 if (atomic_dec_and_lock(&fp
->fi_access
[oflag
], &fp
->fi_lock
)) {
609 struct nfsd_file
*f1
= NULL
;
610 struct nfsd_file
*f2
= NULL
;
612 swap(f1
, fp
->fi_fds
[oflag
]);
613 if (atomic_read(&fp
->fi_access
[1 - oflag
]) == 0)
614 swap(f2
, fp
->fi_fds
[O_RDWR
]);
615 spin_unlock(&fp
->fi_lock
);
623 static void nfs4_file_put_access(struct nfs4_file
*fp
, u32 access
)
625 WARN_ON_ONCE(access
& ~NFS4_SHARE_ACCESS_BOTH
);
627 if (access
& NFS4_SHARE_ACCESS_WRITE
)
628 __nfs4_file_put_access(fp
, O_WRONLY
);
629 if (access
& NFS4_SHARE_ACCESS_READ
)
630 __nfs4_file_put_access(fp
, O_RDONLY
);
634 * Allocate a new open/delegation state counter. This is needed for
635 * pNFS for proper return on close semantics.
637 * Note that we only allocate it for pNFS-enabled exports, otherwise
638 * all pointers to struct nfs4_clnt_odstate are always NULL.
640 static struct nfs4_clnt_odstate
*
641 alloc_clnt_odstate(struct nfs4_client
*clp
)
643 struct nfs4_clnt_odstate
*co
;
645 co
= kmem_cache_zalloc(odstate_slab
, GFP_KERNEL
);
648 refcount_set(&co
->co_odcount
, 1);
654 hash_clnt_odstate_locked(struct nfs4_clnt_odstate
*co
)
656 struct nfs4_file
*fp
= co
->co_file
;
658 lockdep_assert_held(&fp
->fi_lock
);
659 list_add(&co
->co_perfile
, &fp
->fi_clnt_odstate
);
663 get_clnt_odstate(struct nfs4_clnt_odstate
*co
)
666 refcount_inc(&co
->co_odcount
);
670 put_clnt_odstate(struct nfs4_clnt_odstate
*co
)
672 struct nfs4_file
*fp
;
678 if (refcount_dec_and_lock(&co
->co_odcount
, &fp
->fi_lock
)) {
679 list_del(&co
->co_perfile
);
680 spin_unlock(&fp
->fi_lock
);
682 nfsd4_return_all_file_layouts(co
->co_client
, fp
);
683 kmem_cache_free(odstate_slab
, co
);
687 static struct nfs4_clnt_odstate
*
688 find_or_hash_clnt_odstate(struct nfs4_file
*fp
, struct nfs4_clnt_odstate
*new)
690 struct nfs4_clnt_odstate
*co
;
691 struct nfs4_client
*cl
;
698 spin_lock(&fp
->fi_lock
);
699 list_for_each_entry(co
, &fp
->fi_clnt_odstate
, co_perfile
) {
700 if (co
->co_client
== cl
) {
701 get_clnt_odstate(co
);
707 hash_clnt_odstate_locked(new);
709 spin_unlock(&fp
->fi_lock
);
713 struct nfs4_stid
*nfs4_alloc_stid(struct nfs4_client
*cl
, struct kmem_cache
*slab
,
714 void (*sc_free
)(struct nfs4_stid
*))
716 struct nfs4_stid
*stid
;
719 stid
= kmem_cache_zalloc(slab
, GFP_KERNEL
);
723 idr_preload(GFP_KERNEL
);
724 spin_lock(&cl
->cl_lock
);
725 /* Reserving 0 for start of file in nfsdfs "states" file: */
726 new_id
= idr_alloc_cyclic(&cl
->cl_stateids
, stid
, 1, 0, GFP_NOWAIT
);
727 spin_unlock(&cl
->cl_lock
);
732 stid
->sc_free
= sc_free
;
733 stid
->sc_client
= cl
;
734 stid
->sc_stateid
.si_opaque
.so_id
= new_id
;
735 stid
->sc_stateid
.si_opaque
.so_clid
= cl
->cl_clientid
;
736 /* Will be incremented before return to client: */
737 refcount_set(&stid
->sc_count
, 1);
738 spin_lock_init(&stid
->sc_lock
);
739 INIT_LIST_HEAD(&stid
->sc_cp_list
);
742 * It shouldn't be a problem to reuse an opaque stateid value.
743 * I don't think it is for 4.1. But with 4.0 I worry that, for
744 * example, a stray write retransmission could be accepted by
745 * the server when it should have been rejected. Therefore,
746 * adopt a trick from the sctp code to attempt to maximize the
747 * amount of time until an id is reused, by ensuring they always
748 * "increase" (mod INT_MAX):
752 kmem_cache_free(slab
, stid
);
757 * Create a unique stateid_t to represent each COPY.
759 static int nfs4_init_cp_state(struct nfsd_net
*nn
, copy_stateid_t
*stid
,
760 unsigned char sc_type
)
764 stid
->stid
.si_opaque
.so_clid
.cl_boot
= (u32
)nn
->boot_time
;
765 stid
->stid
.si_opaque
.so_clid
.cl_id
= nn
->s2s_cp_cl_id
;
766 stid
->sc_type
= sc_type
;
768 idr_preload(GFP_KERNEL
);
769 spin_lock(&nn
->s2s_cp_lock
);
770 new_id
= idr_alloc_cyclic(&nn
->s2s_cp_stateids
, stid
, 0, 0, GFP_NOWAIT
);
771 stid
->stid
.si_opaque
.so_id
= new_id
;
772 stid
->stid
.si_generation
= 1;
773 spin_unlock(&nn
->s2s_cp_lock
);
780 int nfs4_init_copy_state(struct nfsd_net
*nn
, struct nfsd4_copy
*copy
)
782 return nfs4_init_cp_state(nn
, ©
->cp_stateid
, NFS4_COPY_STID
);
785 struct nfs4_cpntf_state
*nfs4_alloc_init_cpntf_state(struct nfsd_net
*nn
,
786 struct nfs4_stid
*p_stid
)
788 struct nfs4_cpntf_state
*cps
;
790 cps
= kzalloc(sizeof(struct nfs4_cpntf_state
), GFP_KERNEL
);
793 cps
->cpntf_time
= ktime_get_boottime_seconds();
794 refcount_set(&cps
->cp_stateid
.sc_count
, 1);
795 if (!nfs4_init_cp_state(nn
, &cps
->cp_stateid
, NFS4_COPYNOTIFY_STID
))
797 spin_lock(&nn
->s2s_cp_lock
);
798 list_add(&cps
->cp_list
, &p_stid
->sc_cp_list
);
799 spin_unlock(&nn
->s2s_cp_lock
);
806 void nfs4_free_copy_state(struct nfsd4_copy
*copy
)
810 WARN_ON_ONCE(copy
->cp_stateid
.sc_type
!= NFS4_COPY_STID
);
811 nn
= net_generic(copy
->cp_clp
->net
, nfsd_net_id
);
812 spin_lock(&nn
->s2s_cp_lock
);
813 idr_remove(&nn
->s2s_cp_stateids
,
814 copy
->cp_stateid
.stid
.si_opaque
.so_id
);
815 spin_unlock(&nn
->s2s_cp_lock
);
818 static void nfs4_free_cpntf_statelist(struct net
*net
, struct nfs4_stid
*stid
)
820 struct nfs4_cpntf_state
*cps
;
823 nn
= net_generic(net
, nfsd_net_id
);
824 spin_lock(&nn
->s2s_cp_lock
);
825 while (!list_empty(&stid
->sc_cp_list
)) {
826 cps
= list_first_entry(&stid
->sc_cp_list
,
827 struct nfs4_cpntf_state
, cp_list
);
828 _free_cpntf_state_locked(nn
, cps
);
830 spin_unlock(&nn
->s2s_cp_lock
);
833 static struct nfs4_ol_stateid
* nfs4_alloc_open_stateid(struct nfs4_client
*clp
)
835 struct nfs4_stid
*stid
;
837 stid
= nfs4_alloc_stid(clp
, stateid_slab
, nfs4_free_ol_stateid
);
841 return openlockstateid(stid
);
844 static void nfs4_free_deleg(struct nfs4_stid
*stid
)
846 kmem_cache_free(deleg_slab
, stid
);
847 atomic_long_dec(&num_delegations
);
851 * When we recall a delegation, we should be careful not to hand it
852 * out again straight away.
853 * To ensure this we keep a pair of bloom filters ('new' and 'old')
854 * in which the filehandles of recalled delegations are "stored".
855 * If a filehandle appear in either filter, a delegation is blocked.
856 * When a delegation is recalled, the filehandle is stored in the "new"
858 * Every 30 seconds we swap the filters and clear the "new" one,
859 * unless both are empty of course.
861 * Each filter is 256 bits. We hash the filehandle to 32bit and use the
862 * low 3 bytes as hash-table indices.
864 * 'blocked_delegations_lock', which is always taken in block_delegations(),
865 * is used to manage concurrent access. Testing does not need the lock
866 * except when swapping the two filters.
868 static DEFINE_SPINLOCK(blocked_delegations_lock
);
869 static struct bloom_pair
{
870 int entries
, old_entries
;
872 int new; /* index into 'set' */
873 DECLARE_BITMAP(set
[2], 256);
874 } blocked_delegations
;
876 static int delegation_blocked(struct knfsd_fh
*fh
)
879 struct bloom_pair
*bd
= &blocked_delegations
;
881 if (bd
->entries
== 0)
883 if (ktime_get_seconds() - bd
->swap_time
> 30) {
884 spin_lock(&blocked_delegations_lock
);
885 if (ktime_get_seconds() - bd
->swap_time
> 30) {
886 bd
->entries
-= bd
->old_entries
;
887 bd
->old_entries
= bd
->entries
;
888 memset(bd
->set
[bd
->new], 0,
891 bd
->swap_time
= ktime_get_seconds();
893 spin_unlock(&blocked_delegations_lock
);
895 hash
= jhash(&fh
->fh_base
, fh
->fh_size
, 0);
896 if (test_bit(hash
&255, bd
->set
[0]) &&
897 test_bit((hash
>>8)&255, bd
->set
[0]) &&
898 test_bit((hash
>>16)&255, bd
->set
[0]))
901 if (test_bit(hash
&255, bd
->set
[1]) &&
902 test_bit((hash
>>8)&255, bd
->set
[1]) &&
903 test_bit((hash
>>16)&255, bd
->set
[1]))
909 static void block_delegations(struct knfsd_fh
*fh
)
912 struct bloom_pair
*bd
= &blocked_delegations
;
914 hash
= jhash(&fh
->fh_base
, fh
->fh_size
, 0);
916 spin_lock(&blocked_delegations_lock
);
917 __set_bit(hash
&255, bd
->set
[bd
->new]);
918 __set_bit((hash
>>8)&255, bd
->set
[bd
->new]);
919 __set_bit((hash
>>16)&255, bd
->set
[bd
->new]);
920 if (bd
->entries
== 0)
921 bd
->swap_time
= ktime_get_seconds();
923 spin_unlock(&blocked_delegations_lock
);
926 static struct nfs4_delegation
*
927 alloc_init_deleg(struct nfs4_client
*clp
, struct nfs4_file
*fp
,
928 struct svc_fh
*current_fh
,
929 struct nfs4_clnt_odstate
*odstate
)
931 struct nfs4_delegation
*dp
;
934 dprintk("NFSD alloc_init_deleg\n");
935 n
= atomic_long_inc_return(&num_delegations
);
936 if (n
< 0 || n
> max_delegations
)
938 if (delegation_blocked(¤t_fh
->fh_handle
))
940 dp
= delegstateid(nfs4_alloc_stid(clp
, deleg_slab
, nfs4_free_deleg
));
945 * delegation seqid's are never incremented. The 4.1 special
946 * meaning of seqid 0 isn't meaningful, really, but let's avoid
947 * 0 anyway just for consistency and use 1:
949 dp
->dl_stid
.sc_stateid
.si_generation
= 1;
950 INIT_LIST_HEAD(&dp
->dl_perfile
);
951 INIT_LIST_HEAD(&dp
->dl_perclnt
);
952 INIT_LIST_HEAD(&dp
->dl_recall_lru
);
953 dp
->dl_clnt_odstate
= odstate
;
954 get_clnt_odstate(odstate
);
955 dp
->dl_type
= NFS4_OPEN_DELEGATE_READ
;
957 nfsd4_init_cb(&dp
->dl_recall
, dp
->dl_stid
.sc_client
,
958 &nfsd4_cb_recall_ops
, NFSPROC4_CLNT_CB_RECALL
);
960 dp
->dl_stid
.sc_file
= fp
;
963 atomic_long_dec(&num_delegations
);
968 nfs4_put_stid(struct nfs4_stid
*s
)
970 struct nfs4_file
*fp
= s
->sc_file
;
971 struct nfs4_client
*clp
= s
->sc_client
;
973 might_lock(&clp
->cl_lock
);
975 if (!refcount_dec_and_lock(&s
->sc_count
, &clp
->cl_lock
)) {
976 wake_up_all(&close_wq
);
979 idr_remove(&clp
->cl_stateids
, s
->sc_stateid
.si_opaque
.so_id
);
980 nfs4_free_cpntf_statelist(clp
->net
, s
);
981 spin_unlock(&clp
->cl_lock
);
988 nfs4_inc_and_copy_stateid(stateid_t
*dst
, struct nfs4_stid
*stid
)
990 stateid_t
*src
= &stid
->sc_stateid
;
992 spin_lock(&stid
->sc_lock
);
993 if (unlikely(++src
->si_generation
== 0))
994 src
->si_generation
= 1;
995 memcpy(dst
, src
, sizeof(*dst
));
996 spin_unlock(&stid
->sc_lock
);
999 static void put_deleg_file(struct nfs4_file
*fp
)
1001 struct nfsd_file
*nf
= NULL
;
1003 spin_lock(&fp
->fi_lock
);
1004 if (--fp
->fi_delegees
== 0)
1005 swap(nf
, fp
->fi_deleg_file
);
1006 spin_unlock(&fp
->fi_lock
);
1012 static void nfs4_unlock_deleg_lease(struct nfs4_delegation
*dp
)
1014 struct nfs4_file
*fp
= dp
->dl_stid
.sc_file
;
1015 struct nfsd_file
*nf
= fp
->fi_deleg_file
;
1017 WARN_ON_ONCE(!fp
->fi_delegees
);
1019 vfs_setlease(nf
->nf_file
, F_UNLCK
, NULL
, (void **)&dp
);
1023 static void destroy_unhashed_deleg(struct nfs4_delegation
*dp
)
1025 put_clnt_odstate(dp
->dl_clnt_odstate
);
1026 nfs4_unlock_deleg_lease(dp
);
1027 nfs4_put_stid(&dp
->dl_stid
);
1030 void nfs4_unhash_stid(struct nfs4_stid
*s
)
1036 * nfs4_delegation_exists - Discover if this delegation already exists
1037 * @clp: a pointer to the nfs4_client we're granting a delegation to
1038 * @fp: a pointer to the nfs4_file we're granting a delegation on
1041 * On success: true iff an existing delegation is found
1045 nfs4_delegation_exists(struct nfs4_client
*clp
, struct nfs4_file
*fp
)
1047 struct nfs4_delegation
*searchdp
= NULL
;
1048 struct nfs4_client
*searchclp
= NULL
;
1050 lockdep_assert_held(&state_lock
);
1051 lockdep_assert_held(&fp
->fi_lock
);
1053 list_for_each_entry(searchdp
, &fp
->fi_delegations
, dl_perfile
) {
1054 searchclp
= searchdp
->dl_stid
.sc_client
;
1055 if (clp
== searchclp
) {
1063 * hash_delegation_locked - Add a delegation to the appropriate lists
1064 * @dp: a pointer to the nfs4_delegation we are adding.
1065 * @fp: a pointer to the nfs4_file we're granting a delegation on
1068 * On success: NULL if the delegation was successfully hashed.
1070 * On error: -EAGAIN if one was previously granted to this
1071 * nfs4_client for this nfs4_file. Delegation is not hashed.
1076 hash_delegation_locked(struct nfs4_delegation
*dp
, struct nfs4_file
*fp
)
1078 struct nfs4_client
*clp
= dp
->dl_stid
.sc_client
;
1080 lockdep_assert_held(&state_lock
);
1081 lockdep_assert_held(&fp
->fi_lock
);
1083 if (nfs4_delegation_exists(clp
, fp
))
1085 refcount_inc(&dp
->dl_stid
.sc_count
);
1086 dp
->dl_stid
.sc_type
= NFS4_DELEG_STID
;
1087 list_add(&dp
->dl_perfile
, &fp
->fi_delegations
);
1088 list_add(&dp
->dl_perclnt
, &clp
->cl_delegations
);
1093 unhash_delegation_locked(struct nfs4_delegation
*dp
)
1095 struct nfs4_file
*fp
= dp
->dl_stid
.sc_file
;
1097 lockdep_assert_held(&state_lock
);
1099 if (list_empty(&dp
->dl_perfile
))
1102 dp
->dl_stid
.sc_type
= NFS4_CLOSED_DELEG_STID
;
1103 /* Ensure that deleg break won't try to requeue it */
1105 spin_lock(&fp
->fi_lock
);
1106 list_del_init(&dp
->dl_perclnt
);
1107 list_del_init(&dp
->dl_recall_lru
);
1108 list_del_init(&dp
->dl_perfile
);
1109 spin_unlock(&fp
->fi_lock
);
1113 static void destroy_delegation(struct nfs4_delegation
*dp
)
1117 spin_lock(&state_lock
);
1118 unhashed
= unhash_delegation_locked(dp
);
1119 spin_unlock(&state_lock
);
1121 destroy_unhashed_deleg(dp
);
1124 static void revoke_delegation(struct nfs4_delegation
*dp
)
1126 struct nfs4_client
*clp
= dp
->dl_stid
.sc_client
;
1128 WARN_ON(!list_empty(&dp
->dl_recall_lru
));
1130 if (clp
->cl_minorversion
) {
1131 dp
->dl_stid
.sc_type
= NFS4_REVOKED_DELEG_STID
;
1132 refcount_inc(&dp
->dl_stid
.sc_count
);
1133 spin_lock(&clp
->cl_lock
);
1134 list_add(&dp
->dl_recall_lru
, &clp
->cl_revoked
);
1135 spin_unlock(&clp
->cl_lock
);
1137 destroy_unhashed_deleg(dp
);
1144 static unsigned int clientid_hashval(u32 id
)
1146 return id
& CLIENT_HASH_MASK
;
1149 static unsigned int clientstr_hashval(struct xdr_netobj name
)
1151 return opaque_hashval(name
.data
, 8) & CLIENT_HASH_MASK
;
1155 * We store the NONE, READ, WRITE, and BOTH bits separately in the
1156 * st_{access,deny}_bmap field of the stateid, in order to track not
1157 * only what share bits are currently in force, but also what
1158 * combinations of share bits previous opens have used. This allows us
1159 * to enforce the recommendation of rfc 3530 14.2.19 that the server
1160 * return an error if the client attempt to downgrade to a combination
1161 * of share bits not explicable by closing some of its previous opens.
1163 * XXX: This enforcement is actually incomplete, since we don't keep
1164 * track of access/deny bit combinations; so, e.g., we allow:
1166 * OPEN allow read, deny write
1167 * OPEN allow both, deny none
1168 * DOWNGRADE allow read, deny none
1170 * which we should reject.
1173 bmap_to_share_mode(unsigned long bmap
) {
1175 unsigned int access
= 0;
1177 for (i
= 1; i
< 4; i
++) {
1178 if (test_bit(i
, &bmap
))
1184 /* set share access for a given stateid */
1186 set_access(u32 access
, struct nfs4_ol_stateid
*stp
)
1188 unsigned char mask
= 1 << access
;
1190 WARN_ON_ONCE(access
> NFS4_SHARE_ACCESS_BOTH
);
1191 stp
->st_access_bmap
|= mask
;
1194 /* clear share access for a given stateid */
1196 clear_access(u32 access
, struct nfs4_ol_stateid
*stp
)
1198 unsigned char mask
= 1 << access
;
1200 WARN_ON_ONCE(access
> NFS4_SHARE_ACCESS_BOTH
);
1201 stp
->st_access_bmap
&= ~mask
;
1204 /* test whether a given stateid has access */
1206 test_access(u32 access
, struct nfs4_ol_stateid
*stp
)
1208 unsigned char mask
= 1 << access
;
1210 return (bool)(stp
->st_access_bmap
& mask
);
1213 /* set share deny for a given stateid */
1215 set_deny(u32 deny
, struct nfs4_ol_stateid
*stp
)
1217 unsigned char mask
= 1 << deny
;
1219 WARN_ON_ONCE(deny
> NFS4_SHARE_DENY_BOTH
);
1220 stp
->st_deny_bmap
|= mask
;
1223 /* clear share deny for a given stateid */
1225 clear_deny(u32 deny
, struct nfs4_ol_stateid
*stp
)
1227 unsigned char mask
= 1 << deny
;
1229 WARN_ON_ONCE(deny
> NFS4_SHARE_DENY_BOTH
);
1230 stp
->st_deny_bmap
&= ~mask
;
1233 /* test whether a given stateid is denying specific access */
1235 test_deny(u32 deny
, struct nfs4_ol_stateid
*stp
)
1237 unsigned char mask
= 1 << deny
;
1239 return (bool)(stp
->st_deny_bmap
& mask
);
1242 static int nfs4_access_to_omode(u32 access
)
1244 switch (access
& NFS4_SHARE_ACCESS_BOTH
) {
1245 case NFS4_SHARE_ACCESS_READ
:
1247 case NFS4_SHARE_ACCESS_WRITE
:
1249 case NFS4_SHARE_ACCESS_BOTH
:
1257 * A stateid that had a deny mode associated with it is being released
1258 * or downgraded. Recalculate the deny mode on the file.
1261 recalculate_deny_mode(struct nfs4_file
*fp
)
1263 struct nfs4_ol_stateid
*stp
;
1265 spin_lock(&fp
->fi_lock
);
1266 fp
->fi_share_deny
= 0;
1267 list_for_each_entry(stp
, &fp
->fi_stateids
, st_perfile
)
1268 fp
->fi_share_deny
|= bmap_to_share_mode(stp
->st_deny_bmap
);
1269 spin_unlock(&fp
->fi_lock
);
1273 reset_union_bmap_deny(u32 deny
, struct nfs4_ol_stateid
*stp
)
1276 bool change
= false;
1278 for (i
= 1; i
< 4; i
++) {
1279 if ((i
& deny
) != i
) {
1285 /* Recalculate per-file deny mode if there was a change */
1287 recalculate_deny_mode(stp
->st_stid
.sc_file
);
1290 /* release all access and file references for a given stateid */
1292 release_all_access(struct nfs4_ol_stateid
*stp
)
1295 struct nfs4_file
*fp
= stp
->st_stid
.sc_file
;
1297 if (fp
&& stp
->st_deny_bmap
!= 0)
1298 recalculate_deny_mode(fp
);
1300 for (i
= 1; i
< 4; i
++) {
1301 if (test_access(i
, stp
))
1302 nfs4_file_put_access(stp
->st_stid
.sc_file
, i
);
1303 clear_access(i
, stp
);
1307 static inline void nfs4_free_stateowner(struct nfs4_stateowner
*sop
)
1309 kfree(sop
->so_owner
.data
);
1310 sop
->so_ops
->so_free(sop
);
1313 static void nfs4_put_stateowner(struct nfs4_stateowner
*sop
)
1315 struct nfs4_client
*clp
= sop
->so_client
;
1317 might_lock(&clp
->cl_lock
);
1319 if (!atomic_dec_and_lock(&sop
->so_count
, &clp
->cl_lock
))
1321 sop
->so_ops
->so_unhash(sop
);
1322 spin_unlock(&clp
->cl_lock
);
1323 nfs4_free_stateowner(sop
);
1327 nfs4_ol_stateid_unhashed(const struct nfs4_ol_stateid
*stp
)
1329 return list_empty(&stp
->st_perfile
);
1332 static bool unhash_ol_stateid(struct nfs4_ol_stateid
*stp
)
1334 struct nfs4_file
*fp
= stp
->st_stid
.sc_file
;
1336 lockdep_assert_held(&stp
->st_stateowner
->so_client
->cl_lock
);
1338 if (list_empty(&stp
->st_perfile
))
1341 spin_lock(&fp
->fi_lock
);
1342 list_del_init(&stp
->st_perfile
);
1343 spin_unlock(&fp
->fi_lock
);
1344 list_del(&stp
->st_perstateowner
);
1348 static void nfs4_free_ol_stateid(struct nfs4_stid
*stid
)
1350 struct nfs4_ol_stateid
*stp
= openlockstateid(stid
);
1352 put_clnt_odstate(stp
->st_clnt_odstate
);
1353 release_all_access(stp
);
1354 if (stp
->st_stateowner
)
1355 nfs4_put_stateowner(stp
->st_stateowner
);
1356 kmem_cache_free(stateid_slab
, stid
);
1359 static void nfs4_free_lock_stateid(struct nfs4_stid
*stid
)
1361 struct nfs4_ol_stateid
*stp
= openlockstateid(stid
);
1362 struct nfs4_lockowner
*lo
= lockowner(stp
->st_stateowner
);
1363 struct nfsd_file
*nf
;
1365 nf
= find_any_file(stp
->st_stid
.sc_file
);
1367 get_file(nf
->nf_file
);
1368 filp_close(nf
->nf_file
, (fl_owner_t
)lo
);
1371 nfs4_free_ol_stateid(stid
);
1375 * Put the persistent reference to an already unhashed generic stateid, while
1376 * holding the cl_lock. If it's the last reference, then put it onto the
1377 * reaplist for later destruction.
1379 static void put_ol_stateid_locked(struct nfs4_ol_stateid
*stp
,
1380 struct list_head
*reaplist
)
1382 struct nfs4_stid
*s
= &stp
->st_stid
;
1383 struct nfs4_client
*clp
= s
->sc_client
;
1385 lockdep_assert_held(&clp
->cl_lock
);
1387 WARN_ON_ONCE(!list_empty(&stp
->st_locks
));
1389 if (!refcount_dec_and_test(&s
->sc_count
)) {
1390 wake_up_all(&close_wq
);
1394 idr_remove(&clp
->cl_stateids
, s
->sc_stateid
.si_opaque
.so_id
);
1395 list_add(&stp
->st_locks
, reaplist
);
1398 static bool unhash_lock_stateid(struct nfs4_ol_stateid
*stp
)
1400 lockdep_assert_held(&stp
->st_stid
.sc_client
->cl_lock
);
1402 if (!unhash_ol_stateid(stp
))
1404 list_del_init(&stp
->st_locks
);
1405 nfs4_unhash_stid(&stp
->st_stid
);
1409 static void release_lock_stateid(struct nfs4_ol_stateid
*stp
)
1411 struct nfs4_client
*clp
= stp
->st_stid
.sc_client
;
1414 spin_lock(&clp
->cl_lock
);
1415 unhashed
= unhash_lock_stateid(stp
);
1416 spin_unlock(&clp
->cl_lock
);
1418 nfs4_put_stid(&stp
->st_stid
);
1421 static void unhash_lockowner_locked(struct nfs4_lockowner
*lo
)
1423 struct nfs4_client
*clp
= lo
->lo_owner
.so_client
;
1425 lockdep_assert_held(&clp
->cl_lock
);
1427 list_del_init(&lo
->lo_owner
.so_strhash
);
1431 * Free a list of generic stateids that were collected earlier after being
1435 free_ol_stateid_reaplist(struct list_head
*reaplist
)
1437 struct nfs4_ol_stateid
*stp
;
1438 struct nfs4_file
*fp
;
1442 while (!list_empty(reaplist
)) {
1443 stp
= list_first_entry(reaplist
, struct nfs4_ol_stateid
,
1445 list_del(&stp
->st_locks
);
1446 fp
= stp
->st_stid
.sc_file
;
1447 stp
->st_stid
.sc_free(&stp
->st_stid
);
1453 static void release_open_stateid_locks(struct nfs4_ol_stateid
*open_stp
,
1454 struct list_head
*reaplist
)
1456 struct nfs4_ol_stateid
*stp
;
1458 lockdep_assert_held(&open_stp
->st_stid
.sc_client
->cl_lock
);
1460 while (!list_empty(&open_stp
->st_locks
)) {
1461 stp
= list_entry(open_stp
->st_locks
.next
,
1462 struct nfs4_ol_stateid
, st_locks
);
1463 WARN_ON(!unhash_lock_stateid(stp
));
1464 put_ol_stateid_locked(stp
, reaplist
);
1468 static bool unhash_open_stateid(struct nfs4_ol_stateid
*stp
,
1469 struct list_head
*reaplist
)
1471 lockdep_assert_held(&stp
->st_stid
.sc_client
->cl_lock
);
1473 if (!unhash_ol_stateid(stp
))
1475 release_open_stateid_locks(stp
, reaplist
);
1479 static void release_open_stateid(struct nfs4_ol_stateid
*stp
)
1481 LIST_HEAD(reaplist
);
1483 spin_lock(&stp
->st_stid
.sc_client
->cl_lock
);
1484 if (unhash_open_stateid(stp
, &reaplist
))
1485 put_ol_stateid_locked(stp
, &reaplist
);
1486 spin_unlock(&stp
->st_stid
.sc_client
->cl_lock
);
1487 free_ol_stateid_reaplist(&reaplist
);
1490 static void unhash_openowner_locked(struct nfs4_openowner
*oo
)
1492 struct nfs4_client
*clp
= oo
->oo_owner
.so_client
;
1494 lockdep_assert_held(&clp
->cl_lock
);
1496 list_del_init(&oo
->oo_owner
.so_strhash
);
1497 list_del_init(&oo
->oo_perclient
);
1500 static void release_last_closed_stateid(struct nfs4_openowner
*oo
)
1502 struct nfsd_net
*nn
= net_generic(oo
->oo_owner
.so_client
->net
,
1504 struct nfs4_ol_stateid
*s
;
1506 spin_lock(&nn
->client_lock
);
1507 s
= oo
->oo_last_closed_stid
;
1509 list_del_init(&oo
->oo_close_lru
);
1510 oo
->oo_last_closed_stid
= NULL
;
1512 spin_unlock(&nn
->client_lock
);
1514 nfs4_put_stid(&s
->st_stid
);
1517 static void release_openowner(struct nfs4_openowner
*oo
)
1519 struct nfs4_ol_stateid
*stp
;
1520 struct nfs4_client
*clp
= oo
->oo_owner
.so_client
;
1521 struct list_head reaplist
;
1523 INIT_LIST_HEAD(&reaplist
);
1525 spin_lock(&clp
->cl_lock
);
1526 unhash_openowner_locked(oo
);
1527 while (!list_empty(&oo
->oo_owner
.so_stateids
)) {
1528 stp
= list_first_entry(&oo
->oo_owner
.so_stateids
,
1529 struct nfs4_ol_stateid
, st_perstateowner
);
1530 if (unhash_open_stateid(stp
, &reaplist
))
1531 put_ol_stateid_locked(stp
, &reaplist
);
1533 spin_unlock(&clp
->cl_lock
);
1534 free_ol_stateid_reaplist(&reaplist
);
1535 release_last_closed_stateid(oo
);
1536 nfs4_put_stateowner(&oo
->oo_owner
);
1540 hash_sessionid(struct nfs4_sessionid
*sessionid
)
1542 struct nfsd4_sessionid
*sid
= (struct nfsd4_sessionid
*)sessionid
;
1544 return sid
->sequence
% SESSION_HASH_SIZE
;
1547 #ifdef CONFIG_SUNRPC_DEBUG
1549 dump_sessionid(const char *fn
, struct nfs4_sessionid
*sessionid
)
1551 u32
*ptr
= (u32
*)(&sessionid
->data
[0]);
1552 dprintk("%s: %u:%u:%u:%u\n", fn
, ptr
[0], ptr
[1], ptr
[2], ptr
[3]);
1556 dump_sessionid(const char *fn
, struct nfs4_sessionid
*sessionid
)
1562 * Bump the seqid on cstate->replay_owner, and clear replay_owner if it
1563 * won't be used for replay.
1565 void nfsd4_bump_seqid(struct nfsd4_compound_state
*cstate
, __be32 nfserr
)
1567 struct nfs4_stateowner
*so
= cstate
->replay_owner
;
1569 if (nfserr
== nfserr_replay_me
)
1572 if (!seqid_mutating_err(ntohl(nfserr
))) {
1573 nfsd4_cstate_clear_replay(cstate
);
1578 if (so
->so_is_open_owner
)
1579 release_last_closed_stateid(openowner(so
));
1585 gen_sessionid(struct nfsd4_session
*ses
)
1587 struct nfs4_client
*clp
= ses
->se_client
;
1588 struct nfsd4_sessionid
*sid
;
1590 sid
= (struct nfsd4_sessionid
*)ses
->se_sessionid
.data
;
1591 sid
->clientid
= clp
->cl_clientid
;
1592 sid
->sequence
= current_sessionid
++;
1597 * The protocol defines ca_maxresponssize_cached to include the size of
1598 * the rpc header, but all we need to cache is the data starting after
1599 * the end of the initial SEQUENCE operation--the rest we regenerate
1600 * each time. Therefore we can advertise a ca_maxresponssize_cached
1601 * value that is the number of bytes in our cache plus a few additional
1602 * bytes. In order to stay on the safe side, and not promise more than
1603 * we can cache, those additional bytes must be the minimum possible: 24
1604 * bytes of rpc header (xid through accept state, with AUTH_NULL
1605 * verifier), 12 for the compound header (with zero-length tag), and 44
1606 * for the SEQUENCE op response:
1608 #define NFSD_MIN_HDR_SEQ_SZ (24 + 12 + 44)
1611 free_session_slots(struct nfsd4_session
*ses
)
1615 for (i
= 0; i
< ses
->se_fchannel
.maxreqs
; i
++) {
1616 free_svc_cred(&ses
->se_slots
[i
]->sl_cred
);
1617 kfree(ses
->se_slots
[i
]);
1622 * We don't actually need to cache the rpc and session headers, so we
1623 * can allocate a little less for each slot:
1625 static inline u32
slot_bytes(struct nfsd4_channel_attrs
*ca
)
1629 if (ca
->maxresp_cached
< NFSD_MIN_HDR_SEQ_SZ
)
1632 size
= ca
->maxresp_cached
- NFSD_MIN_HDR_SEQ_SZ
;
1633 return size
+ sizeof(struct nfsd4_slot
);
1637 * XXX: If we run out of reserved DRC memory we could (up to a point)
1638 * re-negotiate active sessions and reduce their slot usage to make
1639 * room for new connections. For now we just fail the create session.
1641 static u32
nfsd4_get_drc_mem(struct nfsd4_channel_attrs
*ca
, struct nfsd_net
*nn
)
1643 u32 slotsize
= slot_bytes(ca
);
1644 u32 num
= ca
->maxreqs
;
1645 unsigned long avail
, total_avail
;
1646 unsigned int scale_factor
;
1648 spin_lock(&nfsd_drc_lock
);
1649 if (nfsd_drc_max_mem
> nfsd_drc_mem_used
)
1650 total_avail
= nfsd_drc_max_mem
- nfsd_drc_mem_used
;
1652 /* We have handed out more space than we chose in
1653 * set_max_drc() to allow. That isn't really a
1654 * problem as long as that doesn't make us think we
1655 * have lots more due to integer overflow.
1658 avail
= min((unsigned long)NFSD_MAX_MEM_PER_SESSION
, total_avail
);
1660 * Never use more than a fraction of the remaining memory,
1661 * unless it's the only way to give this client a slot.
1662 * The chosen fraction is either 1/8 or 1/number of threads,
1663 * whichever is smaller. This ensures there are adequate
1664 * slots to support multiple clients per thread.
1665 * Give the client one slot even if that would require
1666 * over-allocation--it is better than failure.
1668 scale_factor
= max_t(unsigned int, 8, nn
->nfsd_serv
->sv_nrthreads
);
1670 avail
= clamp_t(unsigned long, avail
, slotsize
,
1671 total_avail
/scale_factor
);
1672 num
= min_t(int, num
, avail
/ slotsize
);
1673 num
= max_t(int, num
, 1);
1674 nfsd_drc_mem_used
+= num
* slotsize
;
1675 spin_unlock(&nfsd_drc_lock
);
1680 static void nfsd4_put_drc_mem(struct nfsd4_channel_attrs
*ca
)
1682 int slotsize
= slot_bytes(ca
);
1684 spin_lock(&nfsd_drc_lock
);
1685 nfsd_drc_mem_used
-= slotsize
* ca
->maxreqs
;
1686 spin_unlock(&nfsd_drc_lock
);
1689 static struct nfsd4_session
*alloc_session(struct nfsd4_channel_attrs
*fattrs
,
1690 struct nfsd4_channel_attrs
*battrs
)
1692 int numslots
= fattrs
->maxreqs
;
1693 int slotsize
= slot_bytes(fattrs
);
1694 struct nfsd4_session
*new;
1697 BUILD_BUG_ON(NFSD_MAX_SLOTS_PER_SESSION
* sizeof(struct nfsd4_slot
*)
1698 + sizeof(struct nfsd4_session
) > PAGE_SIZE
);
1699 mem
= numslots
* sizeof(struct nfsd4_slot
*);
1701 new = kzalloc(sizeof(*new) + mem
, GFP_KERNEL
);
1704 /* allocate each struct nfsd4_slot and data cache in one piece */
1705 for (i
= 0; i
< numslots
; i
++) {
1706 new->se_slots
[i
] = kzalloc(slotsize
, GFP_KERNEL
);
1707 if (!new->se_slots
[i
])
1711 memcpy(&new->se_fchannel
, fattrs
, sizeof(struct nfsd4_channel_attrs
));
1712 memcpy(&new->se_bchannel
, battrs
, sizeof(struct nfsd4_channel_attrs
));
1717 kfree(new->se_slots
[i
]);
1722 static void free_conn(struct nfsd4_conn
*c
)
1724 svc_xprt_put(c
->cn_xprt
);
1728 static void nfsd4_conn_lost(struct svc_xpt_user
*u
)
1730 struct nfsd4_conn
*c
= container_of(u
, struct nfsd4_conn
, cn_xpt_user
);
1731 struct nfs4_client
*clp
= c
->cn_session
->se_client
;
1733 spin_lock(&clp
->cl_lock
);
1734 if (!list_empty(&c
->cn_persession
)) {
1735 list_del(&c
->cn_persession
);
1738 nfsd4_probe_callback(clp
);
1739 spin_unlock(&clp
->cl_lock
);
1742 static struct nfsd4_conn
*alloc_conn(struct svc_rqst
*rqstp
, u32 flags
)
1744 struct nfsd4_conn
*conn
;
1746 conn
= kmalloc(sizeof(struct nfsd4_conn
), GFP_KERNEL
);
1749 svc_xprt_get(rqstp
->rq_xprt
);
1750 conn
->cn_xprt
= rqstp
->rq_xprt
;
1751 conn
->cn_flags
= flags
;
1752 INIT_LIST_HEAD(&conn
->cn_xpt_user
.list
);
1756 static void __nfsd4_hash_conn(struct nfsd4_conn
*conn
, struct nfsd4_session
*ses
)
1758 conn
->cn_session
= ses
;
1759 list_add(&conn
->cn_persession
, &ses
->se_conns
);
1762 static void nfsd4_hash_conn(struct nfsd4_conn
*conn
, struct nfsd4_session
*ses
)
1764 struct nfs4_client
*clp
= ses
->se_client
;
1766 spin_lock(&clp
->cl_lock
);
1767 __nfsd4_hash_conn(conn
, ses
);
1768 spin_unlock(&clp
->cl_lock
);
1771 static int nfsd4_register_conn(struct nfsd4_conn
*conn
)
1773 conn
->cn_xpt_user
.callback
= nfsd4_conn_lost
;
1774 return register_xpt_user(conn
->cn_xprt
, &conn
->cn_xpt_user
);
1777 static void nfsd4_init_conn(struct svc_rqst
*rqstp
, struct nfsd4_conn
*conn
, struct nfsd4_session
*ses
)
1781 nfsd4_hash_conn(conn
, ses
);
1782 ret
= nfsd4_register_conn(conn
);
1784 /* oops; xprt is already down: */
1785 nfsd4_conn_lost(&conn
->cn_xpt_user
);
1786 /* We may have gained or lost a callback channel: */
1787 nfsd4_probe_callback_sync(ses
->se_client
);
1790 static struct nfsd4_conn
*alloc_conn_from_crses(struct svc_rqst
*rqstp
, struct nfsd4_create_session
*cses
)
1792 u32 dir
= NFS4_CDFC4_FORE
;
1794 if (cses
->flags
& SESSION4_BACK_CHAN
)
1795 dir
|= NFS4_CDFC4_BACK
;
1796 return alloc_conn(rqstp
, dir
);
1799 /* must be called under client_lock */
1800 static void nfsd4_del_conns(struct nfsd4_session
*s
)
1802 struct nfs4_client
*clp
= s
->se_client
;
1803 struct nfsd4_conn
*c
;
1805 spin_lock(&clp
->cl_lock
);
1806 while (!list_empty(&s
->se_conns
)) {
1807 c
= list_first_entry(&s
->se_conns
, struct nfsd4_conn
, cn_persession
);
1808 list_del_init(&c
->cn_persession
);
1809 spin_unlock(&clp
->cl_lock
);
1811 unregister_xpt_user(c
->cn_xprt
, &c
->cn_xpt_user
);
1814 spin_lock(&clp
->cl_lock
);
1816 spin_unlock(&clp
->cl_lock
);
1819 static void __free_session(struct nfsd4_session
*ses
)
1821 free_session_slots(ses
);
1825 static void free_session(struct nfsd4_session
*ses
)
1827 nfsd4_del_conns(ses
);
1828 nfsd4_put_drc_mem(&ses
->se_fchannel
);
1829 __free_session(ses
);
1832 static void init_session(struct svc_rqst
*rqstp
, struct nfsd4_session
*new, struct nfs4_client
*clp
, struct nfsd4_create_session
*cses
)
1835 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
1837 new->se_client
= clp
;
1840 INIT_LIST_HEAD(&new->se_conns
);
1842 new->se_cb_seq_nr
= 1;
1843 new->se_flags
= cses
->flags
;
1844 new->se_cb_prog
= cses
->callback_prog
;
1845 new->se_cb_sec
= cses
->cb_sec
;
1846 atomic_set(&new->se_ref
, 0);
1847 idx
= hash_sessionid(&new->se_sessionid
);
1848 list_add(&new->se_hash
, &nn
->sessionid_hashtbl
[idx
]);
1849 spin_lock(&clp
->cl_lock
);
1850 list_add(&new->se_perclnt
, &clp
->cl_sessions
);
1851 spin_unlock(&clp
->cl_lock
);
1854 struct sockaddr
*sa
= svc_addr(rqstp
);
1856 * This is a little silly; with sessions there's no real
1857 * use for the callback address. Use the peer address
1858 * as a reasonable default for now, but consider fixing
1859 * the rpc client not to require an address in the
1862 rpc_copy_addr((struct sockaddr
*)&clp
->cl_cb_conn
.cb_addr
, sa
);
1863 clp
->cl_cb_conn
.cb_addrlen
= svc_addr_len(sa
);
1867 /* caller must hold client_lock */
1868 static struct nfsd4_session
*
1869 __find_in_sessionid_hashtbl(struct nfs4_sessionid
*sessionid
, struct net
*net
)
1871 struct nfsd4_session
*elem
;
1873 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
1875 lockdep_assert_held(&nn
->client_lock
);
1877 dump_sessionid(__func__
, sessionid
);
1878 idx
= hash_sessionid(sessionid
);
1879 /* Search in the appropriate list */
1880 list_for_each_entry(elem
, &nn
->sessionid_hashtbl
[idx
], se_hash
) {
1881 if (!memcmp(elem
->se_sessionid
.data
, sessionid
->data
,
1882 NFS4_MAX_SESSIONID_LEN
)) {
1887 dprintk("%s: session not found\n", __func__
);
1891 static struct nfsd4_session
*
1892 find_in_sessionid_hashtbl(struct nfs4_sessionid
*sessionid
, struct net
*net
,
1895 struct nfsd4_session
*session
;
1896 __be32 status
= nfserr_badsession
;
1898 session
= __find_in_sessionid_hashtbl(sessionid
, net
);
1901 status
= nfsd4_get_session_locked(session
);
1909 /* caller must hold client_lock */
1911 unhash_session(struct nfsd4_session
*ses
)
1913 struct nfs4_client
*clp
= ses
->se_client
;
1914 struct nfsd_net
*nn
= net_generic(clp
->net
, nfsd_net_id
);
1916 lockdep_assert_held(&nn
->client_lock
);
1918 list_del(&ses
->se_hash
);
1919 spin_lock(&ses
->se_client
->cl_lock
);
1920 list_del(&ses
->se_perclnt
);
1921 spin_unlock(&ses
->se_client
->cl_lock
);
1924 /* SETCLIENTID and SETCLIENTID_CONFIRM Helper functions */
1926 STALE_CLIENTID(clientid_t
*clid
, struct nfsd_net
*nn
)
1929 * We're assuming the clid was not given out from a boot
1930 * precisely 2^32 (about 136 years) before this one. That seems
1931 * a safe assumption:
1933 if (clid
->cl_boot
== (u32
)nn
->boot_time
)
1935 trace_nfsd_clid_stale(clid
);
1940 * XXX Should we use a slab cache ?
1941 * This type of memory management is somewhat inefficient, but we use it
1942 * anyway since SETCLIENTID is not a common operation.
1944 static struct nfs4_client
*alloc_client(struct xdr_netobj name
)
1946 struct nfs4_client
*clp
;
1949 clp
= kmem_cache_zalloc(client_slab
, GFP_KERNEL
);
1952 xdr_netobj_dup(&clp
->cl_name
, &name
, GFP_KERNEL
);
1953 if (clp
->cl_name
.data
== NULL
)
1955 clp
->cl_ownerstr_hashtbl
= kmalloc_array(OWNER_HASH_SIZE
,
1956 sizeof(struct list_head
),
1958 if (!clp
->cl_ownerstr_hashtbl
)
1959 goto err_no_hashtbl
;
1960 for (i
= 0; i
< OWNER_HASH_SIZE
; i
++)
1961 INIT_LIST_HEAD(&clp
->cl_ownerstr_hashtbl
[i
]);
1962 INIT_LIST_HEAD(&clp
->cl_sessions
);
1963 idr_init(&clp
->cl_stateids
);
1964 atomic_set(&clp
->cl_rpc_users
, 0);
1965 clp
->cl_cb_state
= NFSD4_CB_UNKNOWN
;
1966 INIT_LIST_HEAD(&clp
->cl_idhash
);
1967 INIT_LIST_HEAD(&clp
->cl_openowners
);
1968 INIT_LIST_HEAD(&clp
->cl_delegations
);
1969 INIT_LIST_HEAD(&clp
->cl_lru
);
1970 INIT_LIST_HEAD(&clp
->cl_revoked
);
1971 #ifdef CONFIG_NFSD_PNFS
1972 INIT_LIST_HEAD(&clp
->cl_lo_states
);
1974 INIT_LIST_HEAD(&clp
->async_copies
);
1975 spin_lock_init(&clp
->async_lock
);
1976 spin_lock_init(&clp
->cl_lock
);
1977 rpc_init_wait_queue(&clp
->cl_cb_waitq
, "Backchannel slot table");
1980 kfree(clp
->cl_name
.data
);
1982 kmem_cache_free(client_slab
, clp
);
1986 static void __free_client(struct kref
*k
)
1988 struct nfsdfs_client
*c
= container_of(k
, struct nfsdfs_client
, cl_ref
);
1989 struct nfs4_client
*clp
= container_of(c
, struct nfs4_client
, cl_nfsdfs
);
1991 free_svc_cred(&clp
->cl_cred
);
1992 kfree(clp
->cl_ownerstr_hashtbl
);
1993 kfree(clp
->cl_name
.data
);
1994 kfree(clp
->cl_nii_domain
.data
);
1995 kfree(clp
->cl_nii_name
.data
);
1996 idr_destroy(&clp
->cl_stateids
);
1997 kmem_cache_free(client_slab
, clp
);
2000 static void drop_client(struct nfs4_client
*clp
)
2002 kref_put(&clp
->cl_nfsdfs
.cl_ref
, __free_client
);
2006 free_client(struct nfs4_client
*clp
)
2008 while (!list_empty(&clp
->cl_sessions
)) {
2009 struct nfsd4_session
*ses
;
2010 ses
= list_entry(clp
->cl_sessions
.next
, struct nfsd4_session
,
2012 list_del(&ses
->se_perclnt
);
2013 WARN_ON_ONCE(atomic_read(&ses
->se_ref
));
2016 rpc_destroy_wait_queue(&clp
->cl_cb_waitq
);
2017 if (clp
->cl_nfsd_dentry
) {
2018 nfsd_client_rmdir(clp
->cl_nfsd_dentry
);
2019 clp
->cl_nfsd_dentry
= NULL
;
2020 wake_up_all(&expiry_wq
);
2025 /* must be called under the client_lock */
2027 unhash_client_locked(struct nfs4_client
*clp
)
2029 struct nfsd_net
*nn
= net_generic(clp
->net
, nfsd_net_id
);
2030 struct nfsd4_session
*ses
;
2032 lockdep_assert_held(&nn
->client_lock
);
2034 /* Mark the client as expired! */
2036 /* Make it invisible */
2037 if (!list_empty(&clp
->cl_idhash
)) {
2038 list_del_init(&clp
->cl_idhash
);
2039 if (test_bit(NFSD4_CLIENT_CONFIRMED
, &clp
->cl_flags
))
2040 rb_erase(&clp
->cl_namenode
, &nn
->conf_name_tree
);
2042 rb_erase(&clp
->cl_namenode
, &nn
->unconf_name_tree
);
2044 list_del_init(&clp
->cl_lru
);
2045 spin_lock(&clp
->cl_lock
);
2046 list_for_each_entry(ses
, &clp
->cl_sessions
, se_perclnt
)
2047 list_del_init(&ses
->se_hash
);
2048 spin_unlock(&clp
->cl_lock
);
2052 unhash_client(struct nfs4_client
*clp
)
2054 struct nfsd_net
*nn
= net_generic(clp
->net
, nfsd_net_id
);
2056 spin_lock(&nn
->client_lock
);
2057 unhash_client_locked(clp
);
2058 spin_unlock(&nn
->client_lock
);
2061 static __be32
mark_client_expired_locked(struct nfs4_client
*clp
)
2063 if (atomic_read(&clp
->cl_rpc_users
))
2064 return nfserr_jukebox
;
2065 unhash_client_locked(clp
);
2070 __destroy_client(struct nfs4_client
*clp
)
2073 struct nfs4_openowner
*oo
;
2074 struct nfs4_delegation
*dp
;
2075 struct list_head reaplist
;
2077 INIT_LIST_HEAD(&reaplist
);
2078 spin_lock(&state_lock
);
2079 while (!list_empty(&clp
->cl_delegations
)) {
2080 dp
= list_entry(clp
->cl_delegations
.next
, struct nfs4_delegation
, dl_perclnt
);
2081 WARN_ON(!unhash_delegation_locked(dp
));
2082 list_add(&dp
->dl_recall_lru
, &reaplist
);
2084 spin_unlock(&state_lock
);
2085 while (!list_empty(&reaplist
)) {
2086 dp
= list_entry(reaplist
.next
, struct nfs4_delegation
, dl_recall_lru
);
2087 list_del_init(&dp
->dl_recall_lru
);
2088 destroy_unhashed_deleg(dp
);
2090 while (!list_empty(&clp
->cl_revoked
)) {
2091 dp
= list_entry(clp
->cl_revoked
.next
, struct nfs4_delegation
, dl_recall_lru
);
2092 list_del_init(&dp
->dl_recall_lru
);
2093 nfs4_put_stid(&dp
->dl_stid
);
2095 while (!list_empty(&clp
->cl_openowners
)) {
2096 oo
= list_entry(clp
->cl_openowners
.next
, struct nfs4_openowner
, oo_perclient
);
2097 nfs4_get_stateowner(&oo
->oo_owner
);
2098 release_openowner(oo
);
2100 for (i
= 0; i
< OWNER_HASH_SIZE
; i
++) {
2101 struct nfs4_stateowner
*so
, *tmp
;
2103 list_for_each_entry_safe(so
, tmp
, &clp
->cl_ownerstr_hashtbl
[i
],
2105 /* Should be no openowners at this point */
2106 WARN_ON_ONCE(so
->so_is_open_owner
);
2107 remove_blocked_locks(lockowner(so
));
2110 nfsd4_return_all_client_layouts(clp
);
2111 nfsd4_shutdown_copy(clp
);
2112 nfsd4_shutdown_callback(clp
);
2113 if (clp
->cl_cb_conn
.cb_xprt
)
2114 svc_xprt_put(clp
->cl_cb_conn
.cb_xprt
);
2116 wake_up_all(&expiry_wq
);
2120 destroy_client(struct nfs4_client
*clp
)
2123 __destroy_client(clp
);
2126 static void inc_reclaim_complete(struct nfs4_client
*clp
)
2128 struct nfsd_net
*nn
= net_generic(clp
->net
, nfsd_net_id
);
2130 if (!nn
->track_reclaim_completes
)
2132 if (!nfsd4_find_reclaim_client(clp
->cl_name
, nn
))
2134 if (atomic_inc_return(&nn
->nr_reclaim_complete
) ==
2135 nn
->reclaim_str_hashtbl_size
) {
2136 printk(KERN_INFO
"NFSD: all clients done reclaiming, ending NFSv4 grace period (net %x)\n",
2138 nfsd4_end_grace(nn
);
2142 static void expire_client(struct nfs4_client
*clp
)
2145 nfsd4_client_record_remove(clp
);
2146 __destroy_client(clp
);
2149 static void copy_verf(struct nfs4_client
*target
, nfs4_verifier
*source
)
2151 memcpy(target
->cl_verifier
.data
, source
->data
,
2152 sizeof(target
->cl_verifier
.data
));
2155 static void copy_clid(struct nfs4_client
*target
, struct nfs4_client
*source
)
2157 target
->cl_clientid
.cl_boot
= source
->cl_clientid
.cl_boot
;
2158 target
->cl_clientid
.cl_id
= source
->cl_clientid
.cl_id
;
2161 static int copy_cred(struct svc_cred
*target
, struct svc_cred
*source
)
2163 target
->cr_principal
= kstrdup(source
->cr_principal
, GFP_KERNEL
);
2164 target
->cr_raw_principal
= kstrdup(source
->cr_raw_principal
,
2166 target
->cr_targ_princ
= kstrdup(source
->cr_targ_princ
, GFP_KERNEL
);
2167 if ((source
->cr_principal
&& !target
->cr_principal
) ||
2168 (source
->cr_raw_principal
&& !target
->cr_raw_principal
) ||
2169 (source
->cr_targ_princ
&& !target
->cr_targ_princ
))
2172 target
->cr_flavor
= source
->cr_flavor
;
2173 target
->cr_uid
= source
->cr_uid
;
2174 target
->cr_gid
= source
->cr_gid
;
2175 target
->cr_group_info
= source
->cr_group_info
;
2176 get_group_info(target
->cr_group_info
);
2177 target
->cr_gss_mech
= source
->cr_gss_mech
;
2178 if (source
->cr_gss_mech
)
2179 gss_mech_get(source
->cr_gss_mech
);
2184 compare_blob(const struct xdr_netobj
*o1
, const struct xdr_netobj
*o2
)
2186 if (o1
->len
< o2
->len
)
2188 if (o1
->len
> o2
->len
)
2190 return memcmp(o1
->data
, o2
->data
, o1
->len
);
2194 same_verf(nfs4_verifier
*v1
, nfs4_verifier
*v2
)
2196 return 0 == memcmp(v1
->data
, v2
->data
, sizeof(v1
->data
));
2200 same_clid(clientid_t
*cl1
, clientid_t
*cl2
)
2202 return (cl1
->cl_boot
== cl2
->cl_boot
) && (cl1
->cl_id
== cl2
->cl_id
);
2205 static bool groups_equal(struct group_info
*g1
, struct group_info
*g2
)
2209 if (g1
->ngroups
!= g2
->ngroups
)
2211 for (i
=0; i
<g1
->ngroups
; i
++)
2212 if (!gid_eq(g1
->gid
[i
], g2
->gid
[i
]))
2218 * RFC 3530 language requires clid_inuse be returned when the
2219 * "principal" associated with a requests differs from that previously
2220 * used. We use uid, gid's, and gss principal string as our best
2221 * approximation. We also don't want to allow non-gss use of a client
2222 * established using gss: in theory cr_principal should catch that
2223 * change, but in practice cr_principal can be null even in the gss case
2224 * since gssd doesn't always pass down a principal string.
2226 static bool is_gss_cred(struct svc_cred
*cr
)
2228 /* Is cr_flavor one of the gss "pseudoflavors"?: */
2229 return (cr
->cr_flavor
> RPC_AUTH_MAXFLAVOR
);
2234 same_creds(struct svc_cred
*cr1
, struct svc_cred
*cr2
)
2236 if ((is_gss_cred(cr1
) != is_gss_cred(cr2
))
2237 || (!uid_eq(cr1
->cr_uid
, cr2
->cr_uid
))
2238 || (!gid_eq(cr1
->cr_gid
, cr2
->cr_gid
))
2239 || !groups_equal(cr1
->cr_group_info
, cr2
->cr_group_info
))
2241 /* XXX: check that cr_targ_princ fields match ? */
2242 if (cr1
->cr_principal
== cr2
->cr_principal
)
2244 if (!cr1
->cr_principal
|| !cr2
->cr_principal
)
2246 return 0 == strcmp(cr1
->cr_principal
, cr2
->cr_principal
);
2249 static bool svc_rqst_integrity_protected(struct svc_rqst
*rqstp
)
2251 struct svc_cred
*cr
= &rqstp
->rq_cred
;
2254 if (!cr
->cr_gss_mech
)
2256 service
= gss_pseudoflavor_to_service(cr
->cr_gss_mech
, cr
->cr_flavor
);
2257 return service
== RPC_GSS_SVC_INTEGRITY
||
2258 service
== RPC_GSS_SVC_PRIVACY
;
2261 bool nfsd4_mach_creds_match(struct nfs4_client
*cl
, struct svc_rqst
*rqstp
)
2263 struct svc_cred
*cr
= &rqstp
->rq_cred
;
2265 if (!cl
->cl_mach_cred
)
2267 if (cl
->cl_cred
.cr_gss_mech
!= cr
->cr_gss_mech
)
2269 if (!svc_rqst_integrity_protected(rqstp
))
2271 if (cl
->cl_cred
.cr_raw_principal
)
2272 return 0 == strcmp(cl
->cl_cred
.cr_raw_principal
,
2273 cr
->cr_raw_principal
);
2274 if (!cr
->cr_principal
)
2276 return 0 == strcmp(cl
->cl_cred
.cr_principal
, cr
->cr_principal
);
2279 static void gen_confirm(struct nfs4_client
*clp
, struct nfsd_net
*nn
)
2284 * This is opaque to client, so no need to byte-swap. Use
2285 * __force to keep sparse happy
2287 verf
[0] = (__force __be32
)(u32
)ktime_get_real_seconds();
2288 verf
[1] = (__force __be32
)nn
->clverifier_counter
++;
2289 memcpy(clp
->cl_confirm
.data
, verf
, sizeof(clp
->cl_confirm
.data
));
2292 static void gen_clid(struct nfs4_client
*clp
, struct nfsd_net
*nn
)
2294 clp
->cl_clientid
.cl_boot
= (u32
)nn
->boot_time
;
2295 clp
->cl_clientid
.cl_id
= nn
->clientid_counter
++;
2296 gen_confirm(clp
, nn
);
2299 static struct nfs4_stid
*
2300 find_stateid_locked(struct nfs4_client
*cl
, stateid_t
*t
)
2302 struct nfs4_stid
*ret
;
2304 ret
= idr_find(&cl
->cl_stateids
, t
->si_opaque
.so_id
);
2305 if (!ret
|| !ret
->sc_type
)
2310 static struct nfs4_stid
*
2311 find_stateid_by_type(struct nfs4_client
*cl
, stateid_t
*t
, char typemask
)
2313 struct nfs4_stid
*s
;
2315 spin_lock(&cl
->cl_lock
);
2316 s
= find_stateid_locked(cl
, t
);
2318 if (typemask
& s
->sc_type
)
2319 refcount_inc(&s
->sc_count
);
2323 spin_unlock(&cl
->cl_lock
);
2327 static struct nfs4_client
*get_nfsdfs_clp(struct inode
*inode
)
2329 struct nfsdfs_client
*nc
;
2330 nc
= get_nfsdfs_client(inode
);
2333 return container_of(nc
, struct nfs4_client
, cl_nfsdfs
);
2336 static void seq_quote_mem(struct seq_file
*m
, char *data
, int len
)
2338 seq_printf(m
, "\"");
2339 seq_escape_mem_ascii(m
, data
, len
);
2340 seq_printf(m
, "\"");
2343 static int client_info_show(struct seq_file
*m
, void *v
)
2345 struct inode
*inode
= m
->private;
2346 struct nfs4_client
*clp
;
2349 clp
= get_nfsdfs_clp(inode
);
2352 memcpy(&clid
, &clp
->cl_clientid
, sizeof(clid
));
2353 seq_printf(m
, "clientid: 0x%llx\n", clid
);
2354 seq_printf(m
, "address: \"%pISpc\"\n", (struct sockaddr
*)&clp
->cl_addr
);
2355 seq_printf(m
, "name: ");
2356 seq_quote_mem(m
, clp
->cl_name
.data
, clp
->cl_name
.len
);
2357 seq_printf(m
, "\nminor version: %d\n", clp
->cl_minorversion
);
2358 if (clp
->cl_nii_domain
.data
) {
2359 seq_printf(m
, "Implementation domain: ");
2360 seq_quote_mem(m
, clp
->cl_nii_domain
.data
,
2361 clp
->cl_nii_domain
.len
);
2362 seq_printf(m
, "\nImplementation name: ");
2363 seq_quote_mem(m
, clp
->cl_nii_name
.data
, clp
->cl_nii_name
.len
);
2364 seq_printf(m
, "\nImplementation time: [%lld, %ld]\n",
2365 clp
->cl_nii_time
.tv_sec
, clp
->cl_nii_time
.tv_nsec
);
2372 static int client_info_open(struct inode
*inode
, struct file
*file
)
2374 return single_open(file
, client_info_show
, inode
);
2377 static const struct file_operations client_info_fops
= {
2378 .open
= client_info_open
,
2380 .llseek
= seq_lseek
,
2381 .release
= single_release
,
2384 static void *states_start(struct seq_file
*s
, loff_t
*pos
)
2385 __acquires(&clp
->cl_lock
)
2387 struct nfs4_client
*clp
= s
->private;
2388 unsigned long id
= *pos
;
2391 spin_lock(&clp
->cl_lock
);
2392 ret
= idr_get_next_ul(&clp
->cl_stateids
, &id
);
2397 static void *states_next(struct seq_file
*s
, void *v
, loff_t
*pos
)
2399 struct nfs4_client
*clp
= s
->private;
2400 unsigned long id
= *pos
;
2405 ret
= idr_get_next_ul(&clp
->cl_stateids
, &id
);
2410 static void states_stop(struct seq_file
*s
, void *v
)
2411 __releases(&clp
->cl_lock
)
2413 struct nfs4_client
*clp
= s
->private;
2415 spin_unlock(&clp
->cl_lock
);
2418 static void nfs4_show_fname(struct seq_file
*s
, struct nfsd_file
*f
)
2420 seq_printf(s
, "filename: \"%pD2\"", f
->nf_file
);
2423 static void nfs4_show_superblock(struct seq_file
*s
, struct nfsd_file
*f
)
2425 struct inode
*inode
= f
->nf_inode
;
2427 seq_printf(s
, "superblock: \"%02x:%02x:%ld\"",
2428 MAJOR(inode
->i_sb
->s_dev
),
2429 MINOR(inode
->i_sb
->s_dev
),
2433 static void nfs4_show_owner(struct seq_file
*s
, struct nfs4_stateowner
*oo
)
2435 seq_printf(s
, "owner: ");
2436 seq_quote_mem(s
, oo
->so_owner
.data
, oo
->so_owner
.len
);
2439 static void nfs4_show_stateid(struct seq_file
*s
, stateid_t
*stid
)
2441 seq_printf(s
, "0x%.8x", stid
->si_generation
);
2442 seq_printf(s
, "%12phN", &stid
->si_opaque
);
2445 static int nfs4_show_open(struct seq_file
*s
, struct nfs4_stid
*st
)
2447 struct nfs4_ol_stateid
*ols
;
2448 struct nfs4_file
*nf
;
2449 struct nfsd_file
*file
;
2450 struct nfs4_stateowner
*oo
;
2451 unsigned int access
, deny
;
2453 if (st
->sc_type
!= NFS4_OPEN_STID
&& st
->sc_type
!= NFS4_LOCK_STID
)
2454 return 0; /* XXX: or SEQ_SKIP? */
2455 ols
= openlockstateid(st
);
2456 oo
= ols
->st_stateowner
;
2458 file
= find_any_file(nf
);
2462 seq_printf(s
, "- ");
2463 nfs4_show_stateid(s
, &st
->sc_stateid
);
2464 seq_printf(s
, ": { type: open, ");
2466 access
= bmap_to_share_mode(ols
->st_access_bmap
);
2467 deny
= bmap_to_share_mode(ols
->st_deny_bmap
);
2469 seq_printf(s
, "access: %s%s, ",
2470 access
& NFS4_SHARE_ACCESS_READ
? "r" : "-",
2471 access
& NFS4_SHARE_ACCESS_WRITE
? "w" : "-");
2472 seq_printf(s
, "deny: %s%s, ",
2473 deny
& NFS4_SHARE_ACCESS_READ
? "r" : "-",
2474 deny
& NFS4_SHARE_ACCESS_WRITE
? "w" : "-");
2476 nfs4_show_superblock(s
, file
);
2477 seq_printf(s
, ", ");
2478 nfs4_show_fname(s
, file
);
2479 seq_printf(s
, ", ");
2480 nfs4_show_owner(s
, oo
);
2481 seq_printf(s
, " }\n");
2482 nfsd_file_put(file
);
2487 static int nfs4_show_lock(struct seq_file
*s
, struct nfs4_stid
*st
)
2489 struct nfs4_ol_stateid
*ols
;
2490 struct nfs4_file
*nf
;
2491 struct nfsd_file
*file
;
2492 struct nfs4_stateowner
*oo
;
2494 ols
= openlockstateid(st
);
2495 oo
= ols
->st_stateowner
;
2497 file
= find_any_file(nf
);
2501 seq_printf(s
, "- ");
2502 nfs4_show_stateid(s
, &st
->sc_stateid
);
2503 seq_printf(s
, ": { type: lock, ");
2506 * Note: a lock stateid isn't really the same thing as a lock,
2507 * it's the locking state held by one owner on a file, and there
2508 * may be multiple (or no) lock ranges associated with it.
2509 * (Same for the matter is true of open stateids.)
2512 nfs4_show_superblock(s
, file
);
2513 /* XXX: open stateid? */
2514 seq_printf(s
, ", ");
2515 nfs4_show_fname(s
, file
);
2516 seq_printf(s
, ", ");
2517 nfs4_show_owner(s
, oo
);
2518 seq_printf(s
, " }\n");
2519 nfsd_file_put(file
);
2524 static int nfs4_show_deleg(struct seq_file
*s
, struct nfs4_stid
*st
)
2526 struct nfs4_delegation
*ds
;
2527 struct nfs4_file
*nf
;
2528 struct nfsd_file
*file
;
2530 ds
= delegstateid(st
);
2532 file
= find_deleg_file(nf
);
2536 seq_printf(s
, "- ");
2537 nfs4_show_stateid(s
, &st
->sc_stateid
);
2538 seq_printf(s
, ": { type: deleg, ");
2540 /* Kinda dead code as long as we only support read delegs: */
2541 seq_printf(s
, "access: %s, ",
2542 ds
->dl_type
== NFS4_OPEN_DELEGATE_READ
? "r" : "w");
2544 /* XXX: lease time, whether it's being recalled. */
2546 nfs4_show_superblock(s
, file
);
2547 seq_printf(s
, ", ");
2548 nfs4_show_fname(s
, file
);
2549 seq_printf(s
, " }\n");
2550 nfsd_file_put(file
);
2555 static int nfs4_show_layout(struct seq_file
*s
, struct nfs4_stid
*st
)
2557 struct nfs4_layout_stateid
*ls
;
2558 struct nfsd_file
*file
;
2560 ls
= container_of(st
, struct nfs4_layout_stateid
, ls_stid
);
2563 seq_printf(s
, "- ");
2564 nfs4_show_stateid(s
, &st
->sc_stateid
);
2565 seq_printf(s
, ": { type: layout, ");
2567 /* XXX: What else would be useful? */
2569 nfs4_show_superblock(s
, file
);
2570 seq_printf(s
, ", ");
2571 nfs4_show_fname(s
, file
);
2572 seq_printf(s
, " }\n");
2577 static int states_show(struct seq_file
*s
, void *v
)
2579 struct nfs4_stid
*st
= v
;
2581 switch (st
->sc_type
) {
2582 case NFS4_OPEN_STID
:
2583 return nfs4_show_open(s
, st
);
2584 case NFS4_LOCK_STID
:
2585 return nfs4_show_lock(s
, st
);
2586 case NFS4_DELEG_STID
:
2587 return nfs4_show_deleg(s
, st
);
2588 case NFS4_LAYOUT_STID
:
2589 return nfs4_show_layout(s
, st
);
2591 return 0; /* XXX: or SEQ_SKIP? */
2593 /* XXX: copy stateids? */
2596 static struct seq_operations states_seq_ops
= {
2597 .start
= states_start
,
2598 .next
= states_next
,
2599 .stop
= states_stop
,
2603 static int client_states_open(struct inode
*inode
, struct file
*file
)
2606 struct nfs4_client
*clp
;
2609 clp
= get_nfsdfs_clp(inode
);
2613 ret
= seq_open(file
, &states_seq_ops
);
2616 s
= file
->private_data
;
2621 static int client_opens_release(struct inode
*inode
, struct file
*file
)
2623 struct seq_file
*m
= file
->private_data
;
2624 struct nfs4_client
*clp
= m
->private;
2626 /* XXX: alternatively, we could get/drop in seq start/stop */
2631 static const struct file_operations client_states_fops
= {
2632 .open
= client_states_open
,
2634 .llseek
= seq_lseek
,
2635 .release
= client_opens_release
,
2639 * Normally we refuse to destroy clients that are in use, but here the
2640 * administrator is telling us to just do it. We also want to wait
2641 * so the caller has a guarantee that the client's locks are gone by
2642 * the time the write returns:
2644 static void force_expire_client(struct nfs4_client
*clp
)
2646 struct nfsd_net
*nn
= net_generic(clp
->net
, nfsd_net_id
);
2647 bool already_expired
;
2649 spin_lock(&clp
->cl_lock
);
2651 spin_unlock(&clp
->cl_lock
);
2653 wait_event(expiry_wq
, atomic_read(&clp
->cl_rpc_users
) == 0);
2654 spin_lock(&nn
->client_lock
);
2655 already_expired
= list_empty(&clp
->cl_lru
);
2656 if (!already_expired
)
2657 unhash_client_locked(clp
);
2658 spin_unlock(&nn
->client_lock
);
2660 if (!already_expired
)
2663 wait_event(expiry_wq
, clp
->cl_nfsd_dentry
== NULL
);
2666 static ssize_t
client_ctl_write(struct file
*file
, const char __user
*buf
,
2667 size_t size
, loff_t
*pos
)
2670 struct nfs4_client
*clp
;
2672 data
= simple_transaction_get(file
, buf
, size
);
2674 return PTR_ERR(data
);
2675 if (size
!= 7 || 0 != memcmp(data
, "expire\n", 7))
2677 clp
= get_nfsdfs_clp(file_inode(file
));
2680 force_expire_client(clp
);
2685 static const struct file_operations client_ctl_fops
= {
2686 .write
= client_ctl_write
,
2687 .release
= simple_transaction_release
,
2690 static const struct tree_descr client_files
[] = {
2691 [0] = {"info", &client_info_fops
, S_IRUSR
},
2692 [1] = {"states", &client_states_fops
, S_IRUSR
},
2693 [2] = {"ctl", &client_ctl_fops
, S_IWUSR
},
2697 static struct nfs4_client
*create_client(struct xdr_netobj name
,
2698 struct svc_rqst
*rqstp
, nfs4_verifier
*verf
)
2700 struct nfs4_client
*clp
;
2701 struct sockaddr
*sa
= svc_addr(rqstp
);
2703 struct net
*net
= SVC_NET(rqstp
);
2704 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
2706 clp
= alloc_client(name
);
2710 ret
= copy_cred(&clp
->cl_cred
, &rqstp
->rq_cred
);
2716 kref_init(&clp
->cl_nfsdfs
.cl_ref
);
2717 nfsd4_init_cb(&clp
->cl_cb_null
, clp
, NULL
, NFSPROC4_CLNT_CB_NULL
);
2718 clp
->cl_time
= ktime_get_boottime_seconds();
2719 clear_bit(0, &clp
->cl_cb_slot_busy
);
2720 copy_verf(clp
, verf
);
2721 memcpy(&clp
->cl_addr
, sa
, sizeof(struct sockaddr_storage
));
2722 clp
->cl_cb_session
= NULL
;
2724 clp
->cl_nfsd_dentry
= nfsd_client_mkdir(nn
, &clp
->cl_nfsdfs
,
2725 clp
->cl_clientid
.cl_id
- nn
->clientid_base
,
2727 if (!clp
->cl_nfsd_dentry
) {
2735 add_clp_to_name_tree(struct nfs4_client
*new_clp
, struct rb_root
*root
)
2737 struct rb_node
**new = &(root
->rb_node
), *parent
= NULL
;
2738 struct nfs4_client
*clp
;
2741 clp
= rb_entry(*new, struct nfs4_client
, cl_namenode
);
2744 if (compare_blob(&clp
->cl_name
, &new_clp
->cl_name
) > 0)
2745 new = &((*new)->rb_left
);
2747 new = &((*new)->rb_right
);
2750 rb_link_node(&new_clp
->cl_namenode
, parent
, new);
2751 rb_insert_color(&new_clp
->cl_namenode
, root
);
2754 static struct nfs4_client
*
2755 find_clp_in_name_tree(struct xdr_netobj
*name
, struct rb_root
*root
)
2758 struct rb_node
*node
= root
->rb_node
;
2759 struct nfs4_client
*clp
;
2762 clp
= rb_entry(node
, struct nfs4_client
, cl_namenode
);
2763 cmp
= compare_blob(&clp
->cl_name
, name
);
2765 node
= node
->rb_left
;
2767 node
= node
->rb_right
;
2775 add_to_unconfirmed(struct nfs4_client
*clp
)
2777 unsigned int idhashval
;
2778 struct nfsd_net
*nn
= net_generic(clp
->net
, nfsd_net_id
);
2780 lockdep_assert_held(&nn
->client_lock
);
2782 clear_bit(NFSD4_CLIENT_CONFIRMED
, &clp
->cl_flags
);
2783 add_clp_to_name_tree(clp
, &nn
->unconf_name_tree
);
2784 idhashval
= clientid_hashval(clp
->cl_clientid
.cl_id
);
2785 list_add(&clp
->cl_idhash
, &nn
->unconf_id_hashtbl
[idhashval
]);
2786 renew_client_locked(clp
);
2790 move_to_confirmed(struct nfs4_client
*clp
)
2792 unsigned int idhashval
= clientid_hashval(clp
->cl_clientid
.cl_id
);
2793 struct nfsd_net
*nn
= net_generic(clp
->net
, nfsd_net_id
);
2795 lockdep_assert_held(&nn
->client_lock
);
2797 dprintk("NFSD: move_to_confirm nfs4_client %p\n", clp
);
2798 list_move(&clp
->cl_idhash
, &nn
->conf_id_hashtbl
[idhashval
]);
2799 rb_erase(&clp
->cl_namenode
, &nn
->unconf_name_tree
);
2800 add_clp_to_name_tree(clp
, &nn
->conf_name_tree
);
2801 set_bit(NFSD4_CLIENT_CONFIRMED
, &clp
->cl_flags
);
2802 renew_client_locked(clp
);
2805 static struct nfs4_client
*
2806 find_client_in_id_table(struct list_head
*tbl
, clientid_t
*clid
, bool sessions
)
2808 struct nfs4_client
*clp
;
2809 unsigned int idhashval
= clientid_hashval(clid
->cl_id
);
2811 list_for_each_entry(clp
, &tbl
[idhashval
], cl_idhash
) {
2812 if (same_clid(&clp
->cl_clientid
, clid
)) {
2813 if ((bool)clp
->cl_minorversion
!= sessions
)
2815 renew_client_locked(clp
);
2822 static struct nfs4_client
*
2823 find_confirmed_client(clientid_t
*clid
, bool sessions
, struct nfsd_net
*nn
)
2825 struct list_head
*tbl
= nn
->conf_id_hashtbl
;
2827 lockdep_assert_held(&nn
->client_lock
);
2828 return find_client_in_id_table(tbl
, clid
, sessions
);
2831 static struct nfs4_client
*
2832 find_unconfirmed_client(clientid_t
*clid
, bool sessions
, struct nfsd_net
*nn
)
2834 struct list_head
*tbl
= nn
->unconf_id_hashtbl
;
2836 lockdep_assert_held(&nn
->client_lock
);
2837 return find_client_in_id_table(tbl
, clid
, sessions
);
2840 static bool clp_used_exchangeid(struct nfs4_client
*clp
)
2842 return clp
->cl_exchange_flags
!= 0;
2845 static struct nfs4_client
*
2846 find_confirmed_client_by_name(struct xdr_netobj
*name
, struct nfsd_net
*nn
)
2848 lockdep_assert_held(&nn
->client_lock
);
2849 return find_clp_in_name_tree(name
, &nn
->conf_name_tree
);
2852 static struct nfs4_client
*
2853 find_unconfirmed_client_by_name(struct xdr_netobj
*name
, struct nfsd_net
*nn
)
2855 lockdep_assert_held(&nn
->client_lock
);
2856 return find_clp_in_name_tree(name
, &nn
->unconf_name_tree
);
2860 gen_callback(struct nfs4_client
*clp
, struct nfsd4_setclientid
*se
, struct svc_rqst
*rqstp
)
2862 struct nfs4_cb_conn
*conn
= &clp
->cl_cb_conn
;
2863 struct sockaddr
*sa
= svc_addr(rqstp
);
2864 u32 scopeid
= rpc_get_scope_id(sa
);
2865 unsigned short expected_family
;
2867 /* Currently, we only support tcp and tcp6 for the callback channel */
2868 if (se
->se_callback_netid_len
== 3 &&
2869 !memcmp(se
->se_callback_netid_val
, "tcp", 3))
2870 expected_family
= AF_INET
;
2871 else if (se
->se_callback_netid_len
== 4 &&
2872 !memcmp(se
->se_callback_netid_val
, "tcp6", 4))
2873 expected_family
= AF_INET6
;
2877 conn
->cb_addrlen
= rpc_uaddr2sockaddr(clp
->net
, se
->se_callback_addr_val
,
2878 se
->se_callback_addr_len
,
2879 (struct sockaddr
*)&conn
->cb_addr
,
2880 sizeof(conn
->cb_addr
));
2882 if (!conn
->cb_addrlen
|| conn
->cb_addr
.ss_family
!= expected_family
)
2885 if (conn
->cb_addr
.ss_family
== AF_INET6
)
2886 ((struct sockaddr_in6
*)&conn
->cb_addr
)->sin6_scope_id
= scopeid
;
2888 conn
->cb_prog
= se
->se_callback_prog
;
2889 conn
->cb_ident
= se
->se_callback_ident
;
2890 memcpy(&conn
->cb_saddr
, &rqstp
->rq_daddr
, rqstp
->rq_daddrlen
);
2891 trace_nfsd_cb_args(clp
, conn
);
2894 conn
->cb_addr
.ss_family
= AF_UNSPEC
;
2895 conn
->cb_addrlen
= 0;
2896 trace_nfsd_cb_nodelegs(clp
);
2901 * Cache a reply. nfsd4_check_resp_size() has bounded the cache size.
2904 nfsd4_store_cache_entry(struct nfsd4_compoundres
*resp
)
2906 struct xdr_buf
*buf
= resp
->xdr
.buf
;
2907 struct nfsd4_slot
*slot
= resp
->cstate
.slot
;
2910 dprintk("--> %s slot %p\n", __func__
, slot
);
2912 slot
->sl_flags
|= NFSD4_SLOT_INITIALIZED
;
2913 slot
->sl_opcnt
= resp
->opcnt
;
2914 slot
->sl_status
= resp
->cstate
.status
;
2915 free_svc_cred(&slot
->sl_cred
);
2916 copy_cred(&slot
->sl_cred
, &resp
->rqstp
->rq_cred
);
2918 if (!nfsd4_cache_this(resp
)) {
2919 slot
->sl_flags
&= ~NFSD4_SLOT_CACHED
;
2922 slot
->sl_flags
|= NFSD4_SLOT_CACHED
;
2924 base
= resp
->cstate
.data_offset
;
2925 slot
->sl_datalen
= buf
->len
- base
;
2926 if (read_bytes_from_xdr_buf(buf
, base
, slot
->sl_data
, slot
->sl_datalen
))
2927 WARN(1, "%s: sessions DRC could not cache compound\n",
2933 * Encode the replay sequence operation from the slot values.
2934 * If cachethis is FALSE encode the uncached rep error on the next
2935 * operation which sets resp->p and increments resp->opcnt for
2936 * nfs4svc_encode_compoundres.
2940 nfsd4_enc_sequence_replay(struct nfsd4_compoundargs
*args
,
2941 struct nfsd4_compoundres
*resp
)
2943 struct nfsd4_op
*op
;
2944 struct nfsd4_slot
*slot
= resp
->cstate
.slot
;
2946 /* Encode the replayed sequence operation */
2947 op
= &args
->ops
[resp
->opcnt
- 1];
2948 nfsd4_encode_operation(resp
, op
);
2950 if (slot
->sl_flags
& NFSD4_SLOT_CACHED
)
2952 if (args
->opcnt
== 1) {
2954 * The original operation wasn't a solo sequence--we
2955 * always cache those--so this retry must not match the
2958 op
->status
= nfserr_seq_false_retry
;
2960 op
= &args
->ops
[resp
->opcnt
++];
2961 op
->status
= nfserr_retry_uncached_rep
;
2962 nfsd4_encode_operation(resp
, op
);
2968 * The sequence operation is not cached because we can use the slot and
2972 nfsd4_replay_cache_entry(struct nfsd4_compoundres
*resp
,
2973 struct nfsd4_sequence
*seq
)
2975 struct nfsd4_slot
*slot
= resp
->cstate
.slot
;
2976 struct xdr_stream
*xdr
= &resp
->xdr
;
2980 dprintk("--> %s slot %p\n", __func__
, slot
);
2982 status
= nfsd4_enc_sequence_replay(resp
->rqstp
->rq_argp
, resp
);
2986 p
= xdr_reserve_space(xdr
, slot
->sl_datalen
);
2989 return nfserr_serverfault
;
2991 xdr_encode_opaque_fixed(p
, slot
->sl_data
, slot
->sl_datalen
);
2992 xdr_commit_encode(xdr
);
2994 resp
->opcnt
= slot
->sl_opcnt
;
2995 return slot
->sl_status
;
2999 * Set the exchange_id flags returned by the server.
3002 nfsd4_set_ex_flags(struct nfs4_client
*new, struct nfsd4_exchange_id
*clid
)
3004 #ifdef CONFIG_NFSD_PNFS
3005 new->cl_exchange_flags
|= EXCHGID4_FLAG_USE_PNFS_MDS
;
3007 new->cl_exchange_flags
|= EXCHGID4_FLAG_USE_NON_PNFS
;
3010 /* Referrals are supported, Migration is not. */
3011 new->cl_exchange_flags
|= EXCHGID4_FLAG_SUPP_MOVED_REFER
;
3013 /* set the wire flags to return to client. */
3014 clid
->flags
= new->cl_exchange_flags
;
3017 static bool client_has_openowners(struct nfs4_client
*clp
)
3019 struct nfs4_openowner
*oo
;
3021 list_for_each_entry(oo
, &clp
->cl_openowners
, oo_perclient
) {
3022 if (!list_empty(&oo
->oo_owner
.so_stateids
))
3028 static bool client_has_state(struct nfs4_client
*clp
)
3030 return client_has_openowners(clp
)
3031 #ifdef CONFIG_NFSD_PNFS
3032 || !list_empty(&clp
->cl_lo_states
)
3034 || !list_empty(&clp
->cl_delegations
)
3035 || !list_empty(&clp
->cl_sessions
)
3036 || !list_empty(&clp
->async_copies
);
3039 static __be32
copy_impl_id(struct nfs4_client
*clp
,
3040 struct nfsd4_exchange_id
*exid
)
3042 if (!exid
->nii_domain
.data
)
3044 xdr_netobj_dup(&clp
->cl_nii_domain
, &exid
->nii_domain
, GFP_KERNEL
);
3045 if (!clp
->cl_nii_domain
.data
)
3046 return nfserr_jukebox
;
3047 xdr_netobj_dup(&clp
->cl_nii_name
, &exid
->nii_name
, GFP_KERNEL
);
3048 if (!clp
->cl_nii_name
.data
)
3049 return nfserr_jukebox
;
3050 clp
->cl_nii_time
= exid
->nii_time
;
3055 nfsd4_exchange_id(struct svc_rqst
*rqstp
, struct nfsd4_compound_state
*cstate
,
3056 union nfsd4_op_u
*u
)
3058 struct nfsd4_exchange_id
*exid
= &u
->exchange_id
;
3059 struct nfs4_client
*conf
, *new;
3060 struct nfs4_client
*unconf
= NULL
;
3062 char addr_str
[INET6_ADDRSTRLEN
];
3063 nfs4_verifier verf
= exid
->verifier
;
3064 struct sockaddr
*sa
= svc_addr(rqstp
);
3065 bool update
= exid
->flags
& EXCHGID4_FLAG_UPD_CONFIRMED_REC_A
;
3066 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
3068 rpc_ntop(sa
, addr_str
, sizeof(addr_str
));
3069 dprintk("%s rqstp=%p exid=%p clname.len=%u clname.data=%p "
3070 "ip_addr=%s flags %x, spa_how %u\n",
3071 __func__
, rqstp
, exid
, exid
->clname
.len
, exid
->clname
.data
,
3072 addr_str
, exid
->flags
, exid
->spa_how
);
3074 if (exid
->flags
& ~EXCHGID4_FLAG_MASK_A
)
3075 return nfserr_inval
;
3077 new = create_client(exid
->clname
, rqstp
, &verf
);
3079 return nfserr_jukebox
;
3080 status
= copy_impl_id(new, exid
);
3084 switch (exid
->spa_how
) {
3086 exid
->spo_must_enforce
[0] = 0;
3087 exid
->spo_must_enforce
[1] = (
3088 1 << (OP_BIND_CONN_TO_SESSION
- 32) |
3089 1 << (OP_EXCHANGE_ID
- 32) |
3090 1 << (OP_CREATE_SESSION
- 32) |
3091 1 << (OP_DESTROY_SESSION
- 32) |
3092 1 << (OP_DESTROY_CLIENTID
- 32));
3094 exid
->spo_must_allow
[0] &= (1 << (OP_CLOSE
) |
3095 1 << (OP_OPEN_DOWNGRADE
) |
3097 1 << (OP_DELEGRETURN
));
3099 exid
->spo_must_allow
[1] &= (
3100 1 << (OP_TEST_STATEID
- 32) |
3101 1 << (OP_FREE_STATEID
- 32));
3102 if (!svc_rqst_integrity_protected(rqstp
)) {
3103 status
= nfserr_inval
;
3107 * Sometimes userspace doesn't give us a principal.
3108 * Which is a bug, really. Anyway, we can't enforce
3109 * MACH_CRED in that case, better to give up now:
3111 if (!new->cl_cred
.cr_principal
&&
3112 !new->cl_cred
.cr_raw_principal
) {
3113 status
= nfserr_serverfault
;
3116 new->cl_mach_cred
= true;
3119 default: /* checked by xdr code */
3123 status
= nfserr_encr_alg_unsupp
;
3127 /* Cases below refer to rfc 5661 section 18.35.4: */
3128 spin_lock(&nn
->client_lock
);
3129 conf
= find_confirmed_client_by_name(&exid
->clname
, nn
);
3131 bool creds_match
= same_creds(&conf
->cl_cred
, &rqstp
->rq_cred
);
3132 bool verfs_match
= same_verf(&verf
, &conf
->cl_verifier
);
3135 if (!clp_used_exchangeid(conf
)) { /* buggy client */
3136 status
= nfserr_inval
;
3139 if (!nfsd4_mach_creds_match(conf
, rqstp
)) {
3140 status
= nfserr_wrong_cred
;
3143 if (!creds_match
) { /* case 9 */
3144 status
= nfserr_perm
;
3147 if (!verfs_match
) { /* case 8 */
3148 status
= nfserr_not_same
;
3152 exid
->flags
|= EXCHGID4_FLAG_CONFIRMED_R
;
3155 if (!creds_match
) { /* case 3 */
3156 if (client_has_state(conf
)) {
3157 status
= nfserr_clid_inuse
;
3162 if (verfs_match
) { /* case 2 */
3163 conf
->cl_exchange_flags
|= EXCHGID4_FLAG_CONFIRMED_R
;
3166 /* case 5, client reboot */
3171 if (update
) { /* case 7 */
3172 status
= nfserr_noent
;
3176 unconf
= find_unconfirmed_client_by_name(&exid
->clname
, nn
);
3177 if (unconf
) /* case 4, possible retry or client restart */
3178 unhash_client_locked(unconf
);
3180 /* case 1 (normal case) */
3183 status
= mark_client_expired_locked(conf
);
3187 new->cl_minorversion
= cstate
->minorversion
;
3188 new->cl_spo_must_allow
.u
.words
[0] = exid
->spo_must_allow
[0];
3189 new->cl_spo_must_allow
.u
.words
[1] = exid
->spo_must_allow
[1];
3191 add_to_unconfirmed(new);
3194 exid
->clientid
.cl_boot
= conf
->cl_clientid
.cl_boot
;
3195 exid
->clientid
.cl_id
= conf
->cl_clientid
.cl_id
;
3197 exid
->seqid
= conf
->cl_cs_slot
.sl_seqid
+ 1;
3198 nfsd4_set_ex_flags(conf
, exid
);
3200 dprintk("nfsd4_exchange_id seqid %d flags %x\n",
3201 conf
->cl_cs_slot
.sl_seqid
, conf
->cl_exchange_flags
);
3205 spin_unlock(&nn
->client_lock
);
3210 expire_client(unconf
);
3215 check_slot_seqid(u32 seqid
, u32 slot_seqid
, int slot_inuse
)
3217 dprintk("%s enter. seqid %d slot_seqid %d\n", __func__
, seqid
,
3220 /* The slot is in use, and no response has been sent. */
3222 if (seqid
== slot_seqid
)
3223 return nfserr_jukebox
;
3225 return nfserr_seq_misordered
;
3227 /* Note unsigned 32-bit arithmetic handles wraparound: */
3228 if (likely(seqid
== slot_seqid
+ 1))
3230 if (seqid
== slot_seqid
)
3231 return nfserr_replay_cache
;
3232 return nfserr_seq_misordered
;
3236 * Cache the create session result into the create session single DRC
3237 * slot cache by saving the xdr structure. sl_seqid has been set.
3238 * Do this for solo or embedded create session operations.
3241 nfsd4_cache_create_session(struct nfsd4_create_session
*cr_ses
,
3242 struct nfsd4_clid_slot
*slot
, __be32 nfserr
)
3244 slot
->sl_status
= nfserr
;
3245 memcpy(&slot
->sl_cr_ses
, cr_ses
, sizeof(*cr_ses
));
3249 nfsd4_replay_create_session(struct nfsd4_create_session
*cr_ses
,
3250 struct nfsd4_clid_slot
*slot
)
3252 memcpy(cr_ses
, &slot
->sl_cr_ses
, sizeof(*cr_ses
));
3253 return slot
->sl_status
;
3256 #define NFSD_MIN_REQ_HDR_SEQ_SZ ((\
3257 2 * 2 + /* credential,verifier: AUTH_NULL, length 0 */ \
3258 1 + /* MIN tag is length with zero, only length */ \
3259 3 + /* version, opcount, opcode */ \
3260 XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
3261 /* seqid, slotID, slotID, cache */ \
3262 4 ) * sizeof(__be32))
3264 #define NFSD_MIN_RESP_HDR_SEQ_SZ ((\
3265 2 + /* verifier: AUTH_NULL, length 0 */\
3267 1 + /* MIN tag is length with zero, only length */ \
3268 3 + /* opcount, opcode, opstatus*/ \
3269 XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
3270 /* seqid, slotID, slotID, slotID, status */ \
3271 5 ) * sizeof(__be32))
3273 static __be32
check_forechannel_attrs(struct nfsd4_channel_attrs
*ca
, struct nfsd_net
*nn
)
3275 u32 maxrpc
= nn
->nfsd_serv
->sv_max_mesg
;
3277 if (ca
->maxreq_sz
< NFSD_MIN_REQ_HDR_SEQ_SZ
)
3278 return nfserr_toosmall
;
3279 if (ca
->maxresp_sz
< NFSD_MIN_RESP_HDR_SEQ_SZ
)
3280 return nfserr_toosmall
;
3281 ca
->headerpadsz
= 0;
3282 ca
->maxreq_sz
= min_t(u32
, ca
->maxreq_sz
, maxrpc
);
3283 ca
->maxresp_sz
= min_t(u32
, ca
->maxresp_sz
, maxrpc
);
3284 ca
->maxops
= min_t(u32
, ca
->maxops
, NFSD_MAX_OPS_PER_COMPOUND
);
3285 ca
->maxresp_cached
= min_t(u32
, ca
->maxresp_cached
,
3286 NFSD_SLOT_CACHE_SIZE
+ NFSD_MIN_HDR_SEQ_SZ
);
3287 ca
->maxreqs
= min_t(u32
, ca
->maxreqs
, NFSD_MAX_SLOTS_PER_SESSION
);
3289 * Note decreasing slot size below client's request may make it
3290 * difficult for client to function correctly, whereas
3291 * decreasing the number of slots will (just?) affect
3292 * performance. When short on memory we therefore prefer to
3293 * decrease number of slots instead of their size. Clients that
3294 * request larger slots than they need will get poor results:
3295 * Note that we always allow at least one slot, because our
3296 * accounting is soft and provides no guarantees either way.
3298 ca
->maxreqs
= nfsd4_get_drc_mem(ca
, nn
);
3304 * Server's NFSv4.1 backchannel support is AUTH_SYS-only for now.
3305 * These are based on similar macros in linux/sunrpc/msg_prot.h .
3307 #define RPC_MAX_HEADER_WITH_AUTH_SYS \
3308 (RPC_CALLHDRSIZE + 2 * (2 + UNX_CALLSLACK))
3310 #define RPC_MAX_REPHEADER_WITH_AUTH_SYS \
3311 (RPC_REPHDRSIZE + (2 + NUL_REPLYSLACK))
3313 #define NFSD_CB_MAX_REQ_SZ ((NFS4_enc_cb_recall_sz + \
3314 RPC_MAX_HEADER_WITH_AUTH_SYS) * sizeof(__be32))
3315 #define NFSD_CB_MAX_RESP_SZ ((NFS4_dec_cb_recall_sz + \
3316 RPC_MAX_REPHEADER_WITH_AUTH_SYS) * \
3319 static __be32
check_backchannel_attrs(struct nfsd4_channel_attrs
*ca
)
3321 ca
->headerpadsz
= 0;
3323 if (ca
->maxreq_sz
< NFSD_CB_MAX_REQ_SZ
)
3324 return nfserr_toosmall
;
3325 if (ca
->maxresp_sz
< NFSD_CB_MAX_RESP_SZ
)
3326 return nfserr_toosmall
;
3327 ca
->maxresp_cached
= 0;
3329 return nfserr_toosmall
;
3334 static __be32
nfsd4_check_cb_sec(struct nfsd4_cb_sec
*cbs
)
3336 switch (cbs
->flavor
) {
3342 * GSS case: the spec doesn't allow us to return this
3343 * error. But it also doesn't allow us not to support
3345 * I'd rather this fail hard than return some error the
3346 * client might think it can already handle:
3348 return nfserr_encr_alg_unsupp
;
3353 nfsd4_create_session(struct svc_rqst
*rqstp
,
3354 struct nfsd4_compound_state
*cstate
, union nfsd4_op_u
*u
)
3356 struct nfsd4_create_session
*cr_ses
= &u
->create_session
;
3357 struct sockaddr
*sa
= svc_addr(rqstp
);
3358 struct nfs4_client
*conf
, *unconf
;
3359 struct nfs4_client
*old
= NULL
;
3360 struct nfsd4_session
*new;
3361 struct nfsd4_conn
*conn
;
3362 struct nfsd4_clid_slot
*cs_slot
= NULL
;
3364 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
3366 if (cr_ses
->flags
& ~SESSION4_FLAG_MASK_A
)
3367 return nfserr_inval
;
3368 status
= nfsd4_check_cb_sec(&cr_ses
->cb_sec
);
3371 status
= check_forechannel_attrs(&cr_ses
->fore_channel
, nn
);
3374 status
= check_backchannel_attrs(&cr_ses
->back_channel
);
3376 goto out_release_drc_mem
;
3377 status
= nfserr_jukebox
;
3378 new = alloc_session(&cr_ses
->fore_channel
, &cr_ses
->back_channel
);
3380 goto out_release_drc_mem
;
3381 conn
= alloc_conn_from_crses(rqstp
, cr_ses
);
3383 goto out_free_session
;
3385 spin_lock(&nn
->client_lock
);
3386 unconf
= find_unconfirmed_client(&cr_ses
->clientid
, true, nn
);
3387 conf
= find_confirmed_client(&cr_ses
->clientid
, true, nn
);
3388 WARN_ON_ONCE(conf
&& unconf
);
3391 status
= nfserr_wrong_cred
;
3392 if (!nfsd4_mach_creds_match(conf
, rqstp
))
3394 cs_slot
= &conf
->cl_cs_slot
;
3395 status
= check_slot_seqid(cr_ses
->seqid
, cs_slot
->sl_seqid
, 0);
3397 if (status
== nfserr_replay_cache
)
3398 status
= nfsd4_replay_create_session(cr_ses
, cs_slot
);
3401 } else if (unconf
) {
3402 if (!same_creds(&unconf
->cl_cred
, &rqstp
->rq_cred
) ||
3403 !rpc_cmp_addr(sa
, (struct sockaddr
*) &unconf
->cl_addr
)) {
3404 status
= nfserr_clid_inuse
;
3407 status
= nfserr_wrong_cred
;
3408 if (!nfsd4_mach_creds_match(unconf
, rqstp
))
3410 cs_slot
= &unconf
->cl_cs_slot
;
3411 status
= check_slot_seqid(cr_ses
->seqid
, cs_slot
->sl_seqid
, 0);
3413 /* an unconfirmed replay returns misordered */
3414 status
= nfserr_seq_misordered
;
3417 old
= find_confirmed_client_by_name(&unconf
->cl_name
, nn
);
3419 status
= mark_client_expired_locked(old
);
3425 move_to_confirmed(unconf
);
3428 status
= nfserr_stale_clientid
;
3432 /* Persistent sessions are not supported */
3433 cr_ses
->flags
&= ~SESSION4_PERSIST
;
3434 /* Upshifting from TCP to RDMA is not supported */
3435 cr_ses
->flags
&= ~SESSION4_RDMA
;
3437 init_session(rqstp
, new, conf
, cr_ses
);
3438 nfsd4_get_session_locked(new);
3440 memcpy(cr_ses
->sessionid
.data
, new->se_sessionid
.data
,
3441 NFS4_MAX_SESSIONID_LEN
);
3442 cs_slot
->sl_seqid
++;
3443 cr_ses
->seqid
= cs_slot
->sl_seqid
;
3445 /* cache solo and embedded create sessions under the client_lock */
3446 nfsd4_cache_create_session(cr_ses
, cs_slot
, status
);
3447 spin_unlock(&nn
->client_lock
);
3448 /* init connection and backchannel */
3449 nfsd4_init_conn(rqstp
, conn
, new);
3450 nfsd4_put_session(new);
3455 spin_unlock(&nn
->client_lock
);
3460 __free_session(new);
3461 out_release_drc_mem
:
3462 nfsd4_put_drc_mem(&cr_ses
->fore_channel
);
3466 static __be32
nfsd4_map_bcts_dir(u32
*dir
)
3469 case NFS4_CDFC4_FORE
:
3470 case NFS4_CDFC4_BACK
:
3472 case NFS4_CDFC4_FORE_OR_BOTH
:
3473 case NFS4_CDFC4_BACK_OR_BOTH
:
3474 *dir
= NFS4_CDFC4_BOTH
;
3477 return nfserr_inval
;
3480 __be32
nfsd4_backchannel_ctl(struct svc_rqst
*rqstp
,
3481 struct nfsd4_compound_state
*cstate
,
3482 union nfsd4_op_u
*u
)
3484 struct nfsd4_backchannel_ctl
*bc
= &u
->backchannel_ctl
;
3485 struct nfsd4_session
*session
= cstate
->session
;
3486 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
3489 status
= nfsd4_check_cb_sec(&bc
->bc_cb_sec
);
3492 spin_lock(&nn
->client_lock
);
3493 session
->se_cb_prog
= bc
->bc_cb_program
;
3494 session
->se_cb_sec
= bc
->bc_cb_sec
;
3495 spin_unlock(&nn
->client_lock
);
3497 nfsd4_probe_callback(session
->se_client
);
3502 static struct nfsd4_conn
*__nfsd4_find_conn(struct svc_xprt
*xpt
, struct nfsd4_session
*s
)
3504 struct nfsd4_conn
*c
;
3506 list_for_each_entry(c
, &s
->se_conns
, cn_persession
) {
3507 if (c
->cn_xprt
== xpt
) {
3514 static __be32
nfsd4_match_existing_connection(struct svc_rqst
*rqst
,
3515 struct nfsd4_session
*session
, u32 req
)
3517 struct nfs4_client
*clp
= session
->se_client
;
3518 struct svc_xprt
*xpt
= rqst
->rq_xprt
;
3519 struct nfsd4_conn
*c
;
3522 /* Following the last paragraph of RFC 5661 Section 18.34.3: */
3523 spin_lock(&clp
->cl_lock
);
3524 c
= __nfsd4_find_conn(xpt
, session
);
3526 status
= nfserr_noent
;
3527 else if (req
== c
->cn_flags
)
3529 else if (req
== NFS4_CDFC4_FORE_OR_BOTH
&&
3530 c
->cn_flags
!= NFS4_CDFC4_BACK
)
3532 else if (req
== NFS4_CDFC4_BACK_OR_BOTH
&&
3533 c
->cn_flags
!= NFS4_CDFC4_FORE
)
3536 status
= nfserr_inval
;
3537 spin_unlock(&clp
->cl_lock
);
3541 __be32
nfsd4_bind_conn_to_session(struct svc_rqst
*rqstp
,
3542 struct nfsd4_compound_state
*cstate
,
3543 union nfsd4_op_u
*u
)
3545 struct nfsd4_bind_conn_to_session
*bcts
= &u
->bind_conn_to_session
;
3547 struct nfsd4_conn
*conn
;
3548 struct nfsd4_session
*session
;
3549 struct net
*net
= SVC_NET(rqstp
);
3550 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
3552 if (!nfsd4_last_compound_op(rqstp
))
3553 return nfserr_not_only_op
;
3554 spin_lock(&nn
->client_lock
);
3555 session
= find_in_sessionid_hashtbl(&bcts
->sessionid
, net
, &status
);
3556 spin_unlock(&nn
->client_lock
);
3558 goto out_no_session
;
3559 status
= nfserr_wrong_cred
;
3560 if (!nfsd4_mach_creds_match(session
->se_client
, rqstp
))
3562 status
= nfsd4_match_existing_connection(rqstp
, session
, bcts
->dir
);
3563 if (status
== nfs_ok
|| status
== nfserr_inval
)
3565 status
= nfsd4_map_bcts_dir(&bcts
->dir
);
3568 conn
= alloc_conn(rqstp
, bcts
->dir
);
3569 status
= nfserr_jukebox
;
3572 nfsd4_init_conn(rqstp
, conn
, session
);
3575 nfsd4_put_session(session
);
3580 static bool nfsd4_compound_in_session(struct nfsd4_compound_state
*cstate
, struct nfs4_sessionid
*sid
)
3582 if (!cstate
->session
)
3584 return !memcmp(sid
, &cstate
->session
->se_sessionid
, sizeof(*sid
));
3588 nfsd4_destroy_session(struct svc_rqst
*r
, struct nfsd4_compound_state
*cstate
,
3589 union nfsd4_op_u
*u
)
3591 struct nfs4_sessionid
*sessionid
= &u
->destroy_session
.sessionid
;
3592 struct nfsd4_session
*ses
;
3594 int ref_held_by_me
= 0;
3595 struct net
*net
= SVC_NET(r
);
3596 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
3598 status
= nfserr_not_only_op
;
3599 if (nfsd4_compound_in_session(cstate
, sessionid
)) {
3600 if (!nfsd4_last_compound_op(r
))
3604 dump_sessionid(__func__
, sessionid
);
3605 spin_lock(&nn
->client_lock
);
3606 ses
= find_in_sessionid_hashtbl(sessionid
, net
, &status
);
3608 goto out_client_lock
;
3609 status
= nfserr_wrong_cred
;
3610 if (!nfsd4_mach_creds_match(ses
->se_client
, r
))
3611 goto out_put_session
;
3612 status
= mark_session_dead_locked(ses
, 1 + ref_held_by_me
);
3614 goto out_put_session
;
3615 unhash_session(ses
);
3616 spin_unlock(&nn
->client_lock
);
3618 nfsd4_probe_callback_sync(ses
->se_client
);
3620 spin_lock(&nn
->client_lock
);
3623 nfsd4_put_session_locked(ses
);
3625 spin_unlock(&nn
->client_lock
);
3630 static __be32
nfsd4_sequence_check_conn(struct nfsd4_conn
*new, struct nfsd4_session
*ses
)
3632 struct nfs4_client
*clp
= ses
->se_client
;
3633 struct nfsd4_conn
*c
;
3634 __be32 status
= nfs_ok
;
3637 spin_lock(&clp
->cl_lock
);
3638 c
= __nfsd4_find_conn(new->cn_xprt
, ses
);
3641 status
= nfserr_conn_not_bound_to_session
;
3642 if (clp
->cl_mach_cred
)
3644 __nfsd4_hash_conn(new, ses
);
3645 spin_unlock(&clp
->cl_lock
);
3646 ret
= nfsd4_register_conn(new);
3648 /* oops; xprt is already down: */
3649 nfsd4_conn_lost(&new->cn_xpt_user
);
3652 spin_unlock(&clp
->cl_lock
);
3657 static bool nfsd4_session_too_many_ops(struct svc_rqst
*rqstp
, struct nfsd4_session
*session
)
3659 struct nfsd4_compoundargs
*args
= rqstp
->rq_argp
;
3661 return args
->opcnt
> session
->se_fchannel
.maxops
;
3664 static bool nfsd4_request_too_big(struct svc_rqst
*rqstp
,
3665 struct nfsd4_session
*session
)
3667 struct xdr_buf
*xb
= &rqstp
->rq_arg
;
3669 return xb
->len
> session
->se_fchannel
.maxreq_sz
;
3672 static bool replay_matches_cache(struct svc_rqst
*rqstp
,
3673 struct nfsd4_sequence
*seq
, struct nfsd4_slot
*slot
)
3675 struct nfsd4_compoundargs
*argp
= rqstp
->rq_argp
;
3677 if ((bool)(slot
->sl_flags
& NFSD4_SLOT_CACHETHIS
) !=
3678 (bool)seq
->cachethis
)
3681 * If there's an error then the reply can have fewer ops than
3684 if (slot
->sl_opcnt
< argp
->opcnt
&& !slot
->sl_status
)
3687 * But if we cached a reply with *more* ops than the call you're
3688 * sending us now, then this new call is clearly not really a
3689 * replay of the old one:
3691 if (slot
->sl_opcnt
> argp
->opcnt
)
3693 /* This is the only check explicitly called by spec: */
3694 if (!same_creds(&rqstp
->rq_cred
, &slot
->sl_cred
))
3697 * There may be more comparisons we could actually do, but the
3698 * spec doesn't require us to catch every case where the calls
3699 * don't match (that would require caching the call as well as
3700 * the reply), so we don't bother.
3706 nfsd4_sequence(struct svc_rqst
*rqstp
, struct nfsd4_compound_state
*cstate
,
3707 union nfsd4_op_u
*u
)
3709 struct nfsd4_sequence
*seq
= &u
->sequence
;
3710 struct nfsd4_compoundres
*resp
= rqstp
->rq_resp
;
3711 struct xdr_stream
*xdr
= &resp
->xdr
;
3712 struct nfsd4_session
*session
;
3713 struct nfs4_client
*clp
;
3714 struct nfsd4_slot
*slot
;
3715 struct nfsd4_conn
*conn
;
3718 struct net
*net
= SVC_NET(rqstp
);
3719 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
3721 if (resp
->opcnt
!= 1)
3722 return nfserr_sequence_pos
;
3725 * Will be either used or freed by nfsd4_sequence_check_conn
3728 conn
= alloc_conn(rqstp
, NFS4_CDFC4_FORE
);
3730 return nfserr_jukebox
;
3732 spin_lock(&nn
->client_lock
);
3733 session
= find_in_sessionid_hashtbl(&seq
->sessionid
, net
, &status
);
3735 goto out_no_session
;
3736 clp
= session
->se_client
;
3738 status
= nfserr_too_many_ops
;
3739 if (nfsd4_session_too_many_ops(rqstp
, session
))
3740 goto out_put_session
;
3742 status
= nfserr_req_too_big
;
3743 if (nfsd4_request_too_big(rqstp
, session
))
3744 goto out_put_session
;
3746 status
= nfserr_badslot
;
3747 if (seq
->slotid
>= session
->se_fchannel
.maxreqs
)
3748 goto out_put_session
;
3750 slot
= session
->se_slots
[seq
->slotid
];
3751 dprintk("%s: slotid %d\n", __func__
, seq
->slotid
);
3753 /* We do not negotiate the number of slots yet, so set the
3754 * maxslots to the session maxreqs which is used to encode
3755 * sr_highest_slotid and the sr_target_slot id to maxslots */
3756 seq
->maxslots
= session
->se_fchannel
.maxreqs
;
3758 status
= check_slot_seqid(seq
->seqid
, slot
->sl_seqid
,
3759 slot
->sl_flags
& NFSD4_SLOT_INUSE
);
3760 if (status
== nfserr_replay_cache
) {
3761 status
= nfserr_seq_misordered
;
3762 if (!(slot
->sl_flags
& NFSD4_SLOT_INITIALIZED
))
3763 goto out_put_session
;
3764 status
= nfserr_seq_false_retry
;
3765 if (!replay_matches_cache(rqstp
, seq
, slot
))
3766 goto out_put_session
;
3767 cstate
->slot
= slot
;
3768 cstate
->session
= session
;
3770 /* Return the cached reply status and set cstate->status
3771 * for nfsd4_proc_compound processing */
3772 status
= nfsd4_replay_cache_entry(resp
, seq
);
3773 cstate
->status
= nfserr_replay_cache
;
3777 goto out_put_session
;
3779 status
= nfsd4_sequence_check_conn(conn
, session
);
3782 goto out_put_session
;
3784 buflen
= (seq
->cachethis
) ?
3785 session
->se_fchannel
.maxresp_cached
:
3786 session
->se_fchannel
.maxresp_sz
;
3787 status
= (seq
->cachethis
) ? nfserr_rep_too_big_to_cache
:
3789 if (xdr_restrict_buflen(xdr
, buflen
- rqstp
->rq_auth_slack
))
3790 goto out_put_session
;
3791 svc_reserve(rqstp
, buflen
);
3794 /* Success! bump slot seqid */
3795 slot
->sl_seqid
= seq
->seqid
;
3796 slot
->sl_flags
|= NFSD4_SLOT_INUSE
;
3798 slot
->sl_flags
|= NFSD4_SLOT_CACHETHIS
;
3800 slot
->sl_flags
&= ~NFSD4_SLOT_CACHETHIS
;
3802 cstate
->slot
= slot
;
3803 cstate
->session
= session
;
3807 switch (clp
->cl_cb_state
) {
3809 seq
->status_flags
= SEQ4_STATUS_CB_PATH_DOWN
;
3811 case NFSD4_CB_FAULT
:
3812 seq
->status_flags
= SEQ4_STATUS_BACKCHANNEL_FAULT
;
3815 seq
->status_flags
= 0;
3817 if (!list_empty(&clp
->cl_revoked
))
3818 seq
->status_flags
|= SEQ4_STATUS_RECALLABLE_STATE_REVOKED
;
3822 spin_unlock(&nn
->client_lock
);
3825 nfsd4_put_session_locked(session
);
3826 goto out_no_session
;
3830 nfsd4_sequence_done(struct nfsd4_compoundres
*resp
)
3832 struct nfsd4_compound_state
*cs
= &resp
->cstate
;
3834 if (nfsd4_has_session(cs
)) {
3835 if (cs
->status
!= nfserr_replay_cache
) {
3836 nfsd4_store_cache_entry(resp
);
3837 cs
->slot
->sl_flags
&= ~NFSD4_SLOT_INUSE
;
3839 /* Drop session reference that was taken in nfsd4_sequence() */
3840 nfsd4_put_session(cs
->session
);
3842 put_client_renew(cs
->clp
);
3846 nfsd4_destroy_clientid(struct svc_rqst
*rqstp
,
3847 struct nfsd4_compound_state
*cstate
,
3848 union nfsd4_op_u
*u
)
3850 struct nfsd4_destroy_clientid
*dc
= &u
->destroy_clientid
;
3851 struct nfs4_client
*conf
, *unconf
;
3852 struct nfs4_client
*clp
= NULL
;
3854 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
3856 spin_lock(&nn
->client_lock
);
3857 unconf
= find_unconfirmed_client(&dc
->clientid
, true, nn
);
3858 conf
= find_confirmed_client(&dc
->clientid
, true, nn
);
3859 WARN_ON_ONCE(conf
&& unconf
);
3862 if (client_has_state(conf
)) {
3863 status
= nfserr_clientid_busy
;
3866 status
= mark_client_expired_locked(conf
);
3873 status
= nfserr_stale_clientid
;
3876 if (!nfsd4_mach_creds_match(clp
, rqstp
)) {
3878 status
= nfserr_wrong_cred
;
3881 unhash_client_locked(clp
);
3883 spin_unlock(&nn
->client_lock
);
3890 nfsd4_reclaim_complete(struct svc_rqst
*rqstp
,
3891 struct nfsd4_compound_state
*cstate
, union nfsd4_op_u
*u
)
3893 struct nfsd4_reclaim_complete
*rc
= &u
->reclaim_complete
;
3896 if (rc
->rca_one_fs
) {
3897 if (!cstate
->current_fh
.fh_dentry
)
3898 return nfserr_nofilehandle
;
3900 * We don't take advantage of the rca_one_fs case.
3901 * That's OK, it's optional, we can safely ignore it.
3906 status
= nfserr_complete_already
;
3907 if (test_and_set_bit(NFSD4_CLIENT_RECLAIM_COMPLETE
,
3908 &cstate
->session
->se_client
->cl_flags
))
3911 status
= nfserr_stale_clientid
;
3912 if (is_client_expired(cstate
->session
->se_client
))
3914 * The following error isn't really legal.
3915 * But we only get here if the client just explicitly
3916 * destroyed the client. Surely it no longer cares what
3917 * error it gets back on an operation for the dead
3923 nfsd4_client_record_create(cstate
->session
->se_client
);
3924 inc_reclaim_complete(cstate
->session
->se_client
);
3930 nfsd4_setclientid(struct svc_rqst
*rqstp
, struct nfsd4_compound_state
*cstate
,
3931 union nfsd4_op_u
*u
)
3933 struct nfsd4_setclientid
*setclid
= &u
->setclientid
;
3934 struct xdr_netobj clname
= setclid
->se_name
;
3935 nfs4_verifier clverifier
= setclid
->se_verf
;
3936 struct nfs4_client
*conf
, *new;
3937 struct nfs4_client
*unconf
= NULL
;
3939 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
3941 new = create_client(clname
, rqstp
, &clverifier
);
3943 return nfserr_jukebox
;
3944 /* Cases below refer to rfc 3530 section 14.2.33: */
3945 spin_lock(&nn
->client_lock
);
3946 conf
= find_confirmed_client_by_name(&clname
, nn
);
3947 if (conf
&& client_has_state(conf
)) {
3949 status
= nfserr_clid_inuse
;
3950 if (clp_used_exchangeid(conf
))
3952 if (!same_creds(&conf
->cl_cred
, &rqstp
->rq_cred
)) {
3953 trace_nfsd_clid_inuse_err(conf
);
3957 unconf
= find_unconfirmed_client_by_name(&clname
, nn
);
3959 unhash_client_locked(unconf
);
3960 /* We need to handle only case 1: probable callback update */
3961 if (conf
&& same_verf(&conf
->cl_verifier
, &clverifier
)) {
3962 copy_clid(new, conf
);
3963 gen_confirm(new, nn
);
3965 new->cl_minorversion
= 0;
3966 gen_callback(new, setclid
, rqstp
);
3967 add_to_unconfirmed(new);
3968 setclid
->se_clientid
.cl_boot
= new->cl_clientid
.cl_boot
;
3969 setclid
->se_clientid
.cl_id
= new->cl_clientid
.cl_id
;
3970 memcpy(setclid
->se_confirm
.data
, new->cl_confirm
.data
, sizeof(setclid
->se_confirm
.data
));
3974 spin_unlock(&nn
->client_lock
);
3978 expire_client(unconf
);
3984 nfsd4_setclientid_confirm(struct svc_rqst
*rqstp
,
3985 struct nfsd4_compound_state
*cstate
,
3986 union nfsd4_op_u
*u
)
3988 struct nfsd4_setclientid_confirm
*setclientid_confirm
=
3989 &u
->setclientid_confirm
;
3990 struct nfs4_client
*conf
, *unconf
;
3991 struct nfs4_client
*old
= NULL
;
3992 nfs4_verifier confirm
= setclientid_confirm
->sc_confirm
;
3993 clientid_t
* clid
= &setclientid_confirm
->sc_clientid
;
3995 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
3997 if (STALE_CLIENTID(clid
, nn
))
3998 return nfserr_stale_clientid
;
4000 spin_lock(&nn
->client_lock
);
4001 conf
= find_confirmed_client(clid
, false, nn
);
4002 unconf
= find_unconfirmed_client(clid
, false, nn
);
4004 * We try hard to give out unique clientid's, so if we get an
4005 * attempt to confirm the same clientid with a different cred,
4006 * the client may be buggy; this should never happen.
4008 * Nevertheless, RFC 7530 recommends INUSE for this case:
4010 status
= nfserr_clid_inuse
;
4011 if (unconf
&& !same_creds(&unconf
->cl_cred
, &rqstp
->rq_cred
))
4013 if (conf
&& !same_creds(&conf
->cl_cred
, &rqstp
->rq_cred
))
4015 /* cases below refer to rfc 3530 section 14.2.34: */
4016 if (!unconf
|| !same_verf(&confirm
, &unconf
->cl_confirm
)) {
4017 if (conf
&& same_verf(&confirm
, &conf
->cl_confirm
)) {
4018 /* case 2: probable retransmit */
4020 } else /* case 4: client hasn't noticed we rebooted yet? */
4021 status
= nfserr_stale_clientid
;
4025 if (conf
) { /* case 1: callback update */
4027 unhash_client_locked(old
);
4028 nfsd4_change_callback(conf
, &unconf
->cl_cb_conn
);
4029 } else { /* case 3: normal case; new or rebooted client */
4030 old
= find_confirmed_client_by_name(&unconf
->cl_name
, nn
);
4032 status
= nfserr_clid_inuse
;
4033 if (client_has_state(old
)
4034 && !same_creds(&unconf
->cl_cred
,
4037 status
= mark_client_expired_locked(old
);
4043 move_to_confirmed(unconf
);
4046 get_client_locked(conf
);
4047 spin_unlock(&nn
->client_lock
);
4048 nfsd4_probe_callback(conf
);
4049 spin_lock(&nn
->client_lock
);
4050 put_client_renew_locked(conf
);
4052 spin_unlock(&nn
->client_lock
);
4058 static struct nfs4_file
*nfsd4_alloc_file(void)
4060 return kmem_cache_alloc(file_slab
, GFP_KERNEL
);
4063 /* OPEN Share state helper functions */
4064 static void nfsd4_init_file(struct knfsd_fh
*fh
, unsigned int hashval
,
4065 struct nfs4_file
*fp
)
4067 lockdep_assert_held(&state_lock
);
4069 refcount_set(&fp
->fi_ref
, 1);
4070 spin_lock_init(&fp
->fi_lock
);
4071 INIT_LIST_HEAD(&fp
->fi_stateids
);
4072 INIT_LIST_HEAD(&fp
->fi_delegations
);
4073 INIT_LIST_HEAD(&fp
->fi_clnt_odstate
);
4074 fh_copy_shallow(&fp
->fi_fhandle
, fh
);
4075 fp
->fi_deleg_file
= NULL
;
4076 fp
->fi_had_conflict
= false;
4077 fp
->fi_share_deny
= 0;
4078 memset(fp
->fi_fds
, 0, sizeof(fp
->fi_fds
));
4079 memset(fp
->fi_access
, 0, sizeof(fp
->fi_access
));
4080 #ifdef CONFIG_NFSD_PNFS
4081 INIT_LIST_HEAD(&fp
->fi_lo_states
);
4082 atomic_set(&fp
->fi_lo_recalls
, 0);
4084 hlist_add_head_rcu(&fp
->fi_hash
, &file_hashtbl
[hashval
]);
4088 nfsd4_free_slabs(void)
4090 kmem_cache_destroy(client_slab
);
4091 kmem_cache_destroy(openowner_slab
);
4092 kmem_cache_destroy(lockowner_slab
);
4093 kmem_cache_destroy(file_slab
);
4094 kmem_cache_destroy(stateid_slab
);
4095 kmem_cache_destroy(deleg_slab
);
4096 kmem_cache_destroy(odstate_slab
);
4100 nfsd4_init_slabs(void)
4102 client_slab
= kmem_cache_create("nfsd4_clients",
4103 sizeof(struct nfs4_client
), 0, 0, NULL
);
4104 if (client_slab
== NULL
)
4106 openowner_slab
= kmem_cache_create("nfsd4_openowners",
4107 sizeof(struct nfs4_openowner
), 0, 0, NULL
);
4108 if (openowner_slab
== NULL
)
4109 goto out_free_client_slab
;
4110 lockowner_slab
= kmem_cache_create("nfsd4_lockowners",
4111 sizeof(struct nfs4_lockowner
), 0, 0, NULL
);
4112 if (lockowner_slab
== NULL
)
4113 goto out_free_openowner_slab
;
4114 file_slab
= kmem_cache_create("nfsd4_files",
4115 sizeof(struct nfs4_file
), 0, 0, NULL
);
4116 if (file_slab
== NULL
)
4117 goto out_free_lockowner_slab
;
4118 stateid_slab
= kmem_cache_create("nfsd4_stateids",
4119 sizeof(struct nfs4_ol_stateid
), 0, 0, NULL
);
4120 if (stateid_slab
== NULL
)
4121 goto out_free_file_slab
;
4122 deleg_slab
= kmem_cache_create("nfsd4_delegations",
4123 sizeof(struct nfs4_delegation
), 0, 0, NULL
);
4124 if (deleg_slab
== NULL
)
4125 goto out_free_stateid_slab
;
4126 odstate_slab
= kmem_cache_create("nfsd4_odstate",
4127 sizeof(struct nfs4_clnt_odstate
), 0, 0, NULL
);
4128 if (odstate_slab
== NULL
)
4129 goto out_free_deleg_slab
;
4132 out_free_deleg_slab
:
4133 kmem_cache_destroy(deleg_slab
);
4134 out_free_stateid_slab
:
4135 kmem_cache_destroy(stateid_slab
);
4137 kmem_cache_destroy(file_slab
);
4138 out_free_lockowner_slab
:
4139 kmem_cache_destroy(lockowner_slab
);
4140 out_free_openowner_slab
:
4141 kmem_cache_destroy(openowner_slab
);
4142 out_free_client_slab
:
4143 kmem_cache_destroy(client_slab
);
4148 static void init_nfs4_replay(struct nfs4_replay
*rp
)
4150 rp
->rp_status
= nfserr_serverfault
;
4152 rp
->rp_buf
= rp
->rp_ibuf
;
4153 mutex_init(&rp
->rp_mutex
);
4156 static void nfsd4_cstate_assign_replay(struct nfsd4_compound_state
*cstate
,
4157 struct nfs4_stateowner
*so
)
4159 if (!nfsd4_has_session(cstate
)) {
4160 mutex_lock(&so
->so_replay
.rp_mutex
);
4161 cstate
->replay_owner
= nfs4_get_stateowner(so
);
4165 void nfsd4_cstate_clear_replay(struct nfsd4_compound_state
*cstate
)
4167 struct nfs4_stateowner
*so
= cstate
->replay_owner
;
4170 cstate
->replay_owner
= NULL
;
4171 mutex_unlock(&so
->so_replay
.rp_mutex
);
4172 nfs4_put_stateowner(so
);
4176 static inline void *alloc_stateowner(struct kmem_cache
*slab
, struct xdr_netobj
*owner
, struct nfs4_client
*clp
)
4178 struct nfs4_stateowner
*sop
;
4180 sop
= kmem_cache_alloc(slab
, GFP_KERNEL
);
4184 xdr_netobj_dup(&sop
->so_owner
, owner
, GFP_KERNEL
);
4185 if (!sop
->so_owner
.data
) {
4186 kmem_cache_free(slab
, sop
);
4190 INIT_LIST_HEAD(&sop
->so_stateids
);
4191 sop
->so_client
= clp
;
4192 init_nfs4_replay(&sop
->so_replay
);
4193 atomic_set(&sop
->so_count
, 1);
4197 static void hash_openowner(struct nfs4_openowner
*oo
, struct nfs4_client
*clp
, unsigned int strhashval
)
4199 lockdep_assert_held(&clp
->cl_lock
);
4201 list_add(&oo
->oo_owner
.so_strhash
,
4202 &clp
->cl_ownerstr_hashtbl
[strhashval
]);
4203 list_add(&oo
->oo_perclient
, &clp
->cl_openowners
);
4206 static void nfs4_unhash_openowner(struct nfs4_stateowner
*so
)
4208 unhash_openowner_locked(openowner(so
));
4211 static void nfs4_free_openowner(struct nfs4_stateowner
*so
)
4213 struct nfs4_openowner
*oo
= openowner(so
);
4215 kmem_cache_free(openowner_slab
, oo
);
4218 static const struct nfs4_stateowner_operations openowner_ops
= {
4219 .so_unhash
= nfs4_unhash_openowner
,
4220 .so_free
= nfs4_free_openowner
,
4223 static struct nfs4_ol_stateid
*
4224 nfsd4_find_existing_open(struct nfs4_file
*fp
, struct nfsd4_open
*open
)
4226 struct nfs4_ol_stateid
*local
, *ret
= NULL
;
4227 struct nfs4_openowner
*oo
= open
->op_openowner
;
4229 lockdep_assert_held(&fp
->fi_lock
);
4231 list_for_each_entry(local
, &fp
->fi_stateids
, st_perfile
) {
4232 /* ignore lock owners */
4233 if (local
->st_stateowner
->so_is_open_owner
== 0)
4235 if (local
->st_stateowner
!= &oo
->oo_owner
)
4237 if (local
->st_stid
.sc_type
== NFS4_OPEN_STID
) {
4239 refcount_inc(&ret
->st_stid
.sc_count
);
4247 nfsd4_verify_open_stid(struct nfs4_stid
*s
)
4249 __be32 ret
= nfs_ok
;
4251 switch (s
->sc_type
) {
4255 case NFS4_CLOSED_STID
:
4256 case NFS4_CLOSED_DELEG_STID
:
4257 ret
= nfserr_bad_stateid
;
4259 case NFS4_REVOKED_DELEG_STID
:
4260 ret
= nfserr_deleg_revoked
;
4265 /* Lock the stateid st_mutex, and deal with races with CLOSE */
4267 nfsd4_lock_ol_stateid(struct nfs4_ol_stateid
*stp
)
4271 mutex_lock_nested(&stp
->st_mutex
, LOCK_STATEID_MUTEX
);
4272 ret
= nfsd4_verify_open_stid(&stp
->st_stid
);
4274 mutex_unlock(&stp
->st_mutex
);
4278 static struct nfs4_ol_stateid
*
4279 nfsd4_find_and_lock_existing_open(struct nfs4_file
*fp
, struct nfsd4_open
*open
)
4281 struct nfs4_ol_stateid
*stp
;
4283 spin_lock(&fp
->fi_lock
);
4284 stp
= nfsd4_find_existing_open(fp
, open
);
4285 spin_unlock(&fp
->fi_lock
);
4286 if (!stp
|| nfsd4_lock_ol_stateid(stp
) == nfs_ok
)
4288 nfs4_put_stid(&stp
->st_stid
);
4293 static struct nfs4_openowner
*
4294 alloc_init_open_stateowner(unsigned int strhashval
, struct nfsd4_open
*open
,
4295 struct nfsd4_compound_state
*cstate
)
4297 struct nfs4_client
*clp
= cstate
->clp
;
4298 struct nfs4_openowner
*oo
, *ret
;
4300 oo
= alloc_stateowner(openowner_slab
, &open
->op_owner
, clp
);
4303 oo
->oo_owner
.so_ops
= &openowner_ops
;
4304 oo
->oo_owner
.so_is_open_owner
= 1;
4305 oo
->oo_owner
.so_seqid
= open
->op_seqid
;
4307 if (nfsd4_has_session(cstate
))
4308 oo
->oo_flags
|= NFS4_OO_CONFIRMED
;
4310 oo
->oo_last_closed_stid
= NULL
;
4311 INIT_LIST_HEAD(&oo
->oo_close_lru
);
4312 spin_lock(&clp
->cl_lock
);
4313 ret
= find_openstateowner_str_locked(strhashval
, open
, clp
);
4315 hash_openowner(oo
, clp
, strhashval
);
4318 nfs4_free_stateowner(&oo
->oo_owner
);
4320 spin_unlock(&clp
->cl_lock
);
4324 static struct nfs4_ol_stateid
*
4325 init_open_stateid(struct nfs4_file
*fp
, struct nfsd4_open
*open
)
4328 struct nfs4_openowner
*oo
= open
->op_openowner
;
4329 struct nfs4_ol_stateid
*retstp
= NULL
;
4330 struct nfs4_ol_stateid
*stp
;
4333 /* We are moving these outside of the spinlocks to avoid the warnings */
4334 mutex_init(&stp
->st_mutex
);
4335 mutex_lock_nested(&stp
->st_mutex
, OPEN_STATEID_MUTEX
);
4338 spin_lock(&oo
->oo_owner
.so_client
->cl_lock
);
4339 spin_lock(&fp
->fi_lock
);
4341 retstp
= nfsd4_find_existing_open(fp
, open
);
4345 open
->op_stp
= NULL
;
4346 refcount_inc(&stp
->st_stid
.sc_count
);
4347 stp
->st_stid
.sc_type
= NFS4_OPEN_STID
;
4348 INIT_LIST_HEAD(&stp
->st_locks
);
4349 stp
->st_stateowner
= nfs4_get_stateowner(&oo
->oo_owner
);
4351 stp
->st_stid
.sc_file
= fp
;
4352 stp
->st_access_bmap
= 0;
4353 stp
->st_deny_bmap
= 0;
4354 stp
->st_openstp
= NULL
;
4355 list_add(&stp
->st_perstateowner
, &oo
->oo_owner
.so_stateids
);
4356 list_add(&stp
->st_perfile
, &fp
->fi_stateids
);
4359 spin_unlock(&fp
->fi_lock
);
4360 spin_unlock(&oo
->oo_owner
.so_client
->cl_lock
);
4362 /* Handle races with CLOSE */
4363 if (nfsd4_lock_ol_stateid(retstp
) != nfs_ok
) {
4364 nfs4_put_stid(&retstp
->st_stid
);
4367 /* To keep mutex tracking happy */
4368 mutex_unlock(&stp
->st_mutex
);
4375 * In the 4.0 case we need to keep the owners around a little while to handle
4376 * CLOSE replay. We still do need to release any file access that is held by
4377 * them before returning however.
4380 move_to_close_lru(struct nfs4_ol_stateid
*s
, struct net
*net
)
4382 struct nfs4_ol_stateid
*last
;
4383 struct nfs4_openowner
*oo
= openowner(s
->st_stateowner
);
4384 struct nfsd_net
*nn
= net_generic(s
->st_stid
.sc_client
->net
,
4387 dprintk("NFSD: move_to_close_lru nfs4_openowner %p\n", oo
);
4390 * We know that we hold one reference via nfsd4_close, and another
4391 * "persistent" reference for the client. If the refcount is higher
4392 * than 2, then there are still calls in progress that are using this
4393 * stateid. We can't put the sc_file reference until they are finished.
4394 * Wait for the refcount to drop to 2. Since it has been unhashed,
4395 * there should be no danger of the refcount going back up again at
4398 wait_event(close_wq
, refcount_read(&s
->st_stid
.sc_count
) == 2);
4400 release_all_access(s
);
4401 if (s
->st_stid
.sc_file
) {
4402 put_nfs4_file(s
->st_stid
.sc_file
);
4403 s
->st_stid
.sc_file
= NULL
;
4406 spin_lock(&nn
->client_lock
);
4407 last
= oo
->oo_last_closed_stid
;
4408 oo
->oo_last_closed_stid
= s
;
4409 list_move_tail(&oo
->oo_close_lru
, &nn
->close_lru
);
4410 oo
->oo_time
= ktime_get_boottime_seconds();
4411 spin_unlock(&nn
->client_lock
);
4413 nfs4_put_stid(&last
->st_stid
);
4416 /* search file_hashtbl[] for file */
4417 static struct nfs4_file
*
4418 find_file_locked(struct knfsd_fh
*fh
, unsigned int hashval
)
4420 struct nfs4_file
*fp
;
4422 hlist_for_each_entry_rcu(fp
, &file_hashtbl
[hashval
], fi_hash
,
4423 lockdep_is_held(&state_lock
)) {
4424 if (fh_match(&fp
->fi_fhandle
, fh
)) {
4425 if (refcount_inc_not_zero(&fp
->fi_ref
))
4433 find_file(struct knfsd_fh
*fh
)
4435 struct nfs4_file
*fp
;
4436 unsigned int hashval
= file_hashval(fh
);
4439 fp
= find_file_locked(fh
, hashval
);
4444 static struct nfs4_file
*
4445 find_or_add_file(struct nfs4_file
*new, struct knfsd_fh
*fh
)
4447 struct nfs4_file
*fp
;
4448 unsigned int hashval
= file_hashval(fh
);
4451 fp
= find_file_locked(fh
, hashval
);
4456 spin_lock(&state_lock
);
4457 fp
= find_file_locked(fh
, hashval
);
4458 if (likely(fp
== NULL
)) {
4459 nfsd4_init_file(fh
, hashval
, new);
4462 spin_unlock(&state_lock
);
4468 * Called to check deny when READ with all zero stateid or
4469 * WRITE with all zero or all one stateid
4472 nfs4_share_conflict(struct svc_fh
*current_fh
, unsigned int deny_type
)
4474 struct nfs4_file
*fp
;
4475 __be32 ret
= nfs_ok
;
4477 fp
= find_file(¤t_fh
->fh_handle
);
4480 /* Check for conflicting share reservations */
4481 spin_lock(&fp
->fi_lock
);
4482 if (fp
->fi_share_deny
& deny_type
)
4483 ret
= nfserr_locked
;
4484 spin_unlock(&fp
->fi_lock
);
4489 static void nfsd4_cb_recall_prepare(struct nfsd4_callback
*cb
)
4491 struct nfs4_delegation
*dp
= cb_to_delegation(cb
);
4492 struct nfsd_net
*nn
= net_generic(dp
->dl_stid
.sc_client
->net
,
4495 block_delegations(&dp
->dl_stid
.sc_file
->fi_fhandle
);
4498 * We can't do this in nfsd_break_deleg_cb because it is
4499 * already holding inode->i_lock.
4501 * If the dl_time != 0, then we know that it has already been
4502 * queued for a lease break. Don't queue it again.
4504 spin_lock(&state_lock
);
4505 if (dp
->dl_time
== 0) {
4506 dp
->dl_time
= ktime_get_boottime_seconds();
4507 list_add_tail(&dp
->dl_recall_lru
, &nn
->del_recall_lru
);
4509 spin_unlock(&state_lock
);
4512 static int nfsd4_cb_recall_done(struct nfsd4_callback
*cb
,
4513 struct rpc_task
*task
)
4515 struct nfs4_delegation
*dp
= cb_to_delegation(cb
);
4517 if (dp
->dl_stid
.sc_type
== NFS4_CLOSED_DELEG_STID
||
4518 dp
->dl_stid
.sc_type
== NFS4_REVOKED_DELEG_STID
)
4521 switch (task
->tk_status
) {
4524 case -NFS4ERR_DELAY
:
4525 rpc_delay(task
, 2 * HZ
);
4528 case -NFS4ERR_BAD_STATEID
:
4530 * Race: client probably got cb_recall before open reply
4531 * granting delegation.
4533 if (dp
->dl_retries
--) {
4534 rpc_delay(task
, 2 * HZ
);
4543 static void nfsd4_cb_recall_release(struct nfsd4_callback
*cb
)
4545 struct nfs4_delegation
*dp
= cb_to_delegation(cb
);
4547 nfs4_put_stid(&dp
->dl_stid
);
4550 static const struct nfsd4_callback_ops nfsd4_cb_recall_ops
= {
4551 .prepare
= nfsd4_cb_recall_prepare
,
4552 .done
= nfsd4_cb_recall_done
,
4553 .release
= nfsd4_cb_recall_release
,
4556 static void nfsd_break_one_deleg(struct nfs4_delegation
*dp
)
4559 * We're assuming the state code never drops its reference
4560 * without first removing the lease. Since we're in this lease
4561 * callback (and since the lease code is serialized by the
4562 * i_lock) we know the server hasn't removed the lease yet, and
4563 * we know it's safe to take a reference.
4565 refcount_inc(&dp
->dl_stid
.sc_count
);
4566 nfsd4_run_cb(&dp
->dl_recall
);
4569 /* Called from break_lease() with i_lock held. */
4571 nfsd_break_deleg_cb(struct file_lock
*fl
)
4574 struct nfs4_delegation
*dp
= (struct nfs4_delegation
*)fl
->fl_owner
;
4575 struct nfs4_file
*fp
= dp
->dl_stid
.sc_file
;
4577 trace_nfsd_deleg_break(&dp
->dl_stid
.sc_stateid
);
4580 * We don't want the locks code to timeout the lease for us;
4581 * we'll remove it ourself if a delegation isn't returned
4584 fl
->fl_break_time
= 0;
4586 spin_lock(&fp
->fi_lock
);
4587 fp
->fi_had_conflict
= true;
4588 nfsd_break_one_deleg(dp
);
4589 spin_unlock(&fp
->fi_lock
);
4593 static bool nfsd_breaker_owns_lease(struct file_lock
*fl
)
4595 struct nfs4_delegation
*dl
= fl
->fl_owner
;
4596 struct svc_rqst
*rqst
;
4597 struct nfs4_client
*clp
;
4601 rqst
= kthread_data(current
);
4602 /* Note rq_prog == NFS_ACL_PROGRAM is also possible: */
4603 if (rqst
->rq_prog
!= NFS_PROGRAM
|| rqst
->rq_vers
< 4)
4605 clp
= *(rqst
->rq_lease_breaker
);
4606 return dl
->dl_stid
.sc_client
== clp
;
4610 nfsd_change_deleg_cb(struct file_lock
*onlist
, int arg
,
4611 struct list_head
*dispose
)
4614 return lease_modify(onlist
, arg
, dispose
);
4619 static const struct lock_manager_operations nfsd_lease_mng_ops
= {
4620 .lm_breaker_owns_lease
= nfsd_breaker_owns_lease
,
4621 .lm_break
= nfsd_break_deleg_cb
,
4622 .lm_change
= nfsd_change_deleg_cb
,
4625 static __be32
nfsd4_check_seqid(struct nfsd4_compound_state
*cstate
, struct nfs4_stateowner
*so
, u32 seqid
)
4627 if (nfsd4_has_session(cstate
))
4629 if (seqid
== so
->so_seqid
- 1)
4630 return nfserr_replay_me
;
4631 if (seqid
== so
->so_seqid
)
4633 return nfserr_bad_seqid
;
4636 static __be32
lookup_clientid(clientid_t
*clid
,
4637 struct nfsd4_compound_state
*cstate
,
4638 struct nfsd_net
*nn
,
4641 struct nfs4_client
*found
;
4644 found
= cstate
->clp
;
4645 if (!same_clid(&found
->cl_clientid
, clid
))
4646 return nfserr_stale_clientid
;
4650 if (STALE_CLIENTID(clid
, nn
))
4651 return nfserr_stale_clientid
;
4654 * For v4.1+ we get the client in the SEQUENCE op. If we don't have one
4655 * cached already then we know this is for is for v4.0 and "sessions"
4658 WARN_ON_ONCE(cstate
->session
);
4659 spin_lock(&nn
->client_lock
);
4660 found
= find_confirmed_client(clid
, sessions
, nn
);
4662 spin_unlock(&nn
->client_lock
);
4663 return nfserr_expired
;
4665 atomic_inc(&found
->cl_rpc_users
);
4666 spin_unlock(&nn
->client_lock
);
4668 /* Cache the nfs4_client in cstate! */
4669 cstate
->clp
= found
;
4674 nfsd4_process_open1(struct nfsd4_compound_state
*cstate
,
4675 struct nfsd4_open
*open
, struct nfsd_net
*nn
)
4677 clientid_t
*clientid
= &open
->op_clientid
;
4678 struct nfs4_client
*clp
= NULL
;
4679 unsigned int strhashval
;
4680 struct nfs4_openowner
*oo
= NULL
;
4683 if (STALE_CLIENTID(&open
->op_clientid
, nn
))
4684 return nfserr_stale_clientid
;
4686 * In case we need it later, after we've already created the
4687 * file and don't want to risk a further failure:
4689 open
->op_file
= nfsd4_alloc_file();
4690 if (open
->op_file
== NULL
)
4691 return nfserr_jukebox
;
4693 status
= lookup_clientid(clientid
, cstate
, nn
, false);
4698 strhashval
= ownerstr_hashval(&open
->op_owner
);
4699 oo
= find_openstateowner_str(strhashval
, open
, clp
);
4700 open
->op_openowner
= oo
;
4704 if (!(oo
->oo_flags
& NFS4_OO_CONFIRMED
)) {
4705 /* Replace unconfirmed owners without checking for replay. */
4706 release_openowner(oo
);
4707 open
->op_openowner
= NULL
;
4710 status
= nfsd4_check_seqid(cstate
, &oo
->oo_owner
, open
->op_seqid
);
4715 oo
= alloc_init_open_stateowner(strhashval
, open
, cstate
);
4717 return nfserr_jukebox
;
4718 open
->op_openowner
= oo
;
4720 open
->op_stp
= nfs4_alloc_open_stateid(clp
);
4722 return nfserr_jukebox
;
4724 if (nfsd4_has_session(cstate
) &&
4725 (cstate
->current_fh
.fh_export
->ex_flags
& NFSEXP_PNFS
)) {
4726 open
->op_odstate
= alloc_clnt_odstate(clp
);
4727 if (!open
->op_odstate
)
4728 return nfserr_jukebox
;
4734 static inline __be32
4735 nfs4_check_delegmode(struct nfs4_delegation
*dp
, int flags
)
4737 if ((flags
& WR_STATE
) && (dp
->dl_type
== NFS4_OPEN_DELEGATE_READ
))
4738 return nfserr_openmode
;
4743 static int share_access_to_flags(u32 share_access
)
4745 return share_access
== NFS4_SHARE_ACCESS_READ
? RD_STATE
: WR_STATE
;
4748 static struct nfs4_delegation
*find_deleg_stateid(struct nfs4_client
*cl
, stateid_t
*s
)
4750 struct nfs4_stid
*ret
;
4752 ret
= find_stateid_by_type(cl
, s
,
4753 NFS4_DELEG_STID
|NFS4_REVOKED_DELEG_STID
);
4756 return delegstateid(ret
);
4759 static bool nfsd4_is_deleg_cur(struct nfsd4_open
*open
)
4761 return open
->op_claim_type
== NFS4_OPEN_CLAIM_DELEGATE_CUR
||
4762 open
->op_claim_type
== NFS4_OPEN_CLAIM_DELEG_CUR_FH
;
4766 nfs4_check_deleg(struct nfs4_client
*cl
, struct nfsd4_open
*open
,
4767 struct nfs4_delegation
**dp
)
4770 __be32 status
= nfserr_bad_stateid
;
4771 struct nfs4_delegation
*deleg
;
4773 deleg
= find_deleg_stateid(cl
, &open
->op_delegate_stateid
);
4776 if (deleg
->dl_stid
.sc_type
== NFS4_REVOKED_DELEG_STID
) {
4777 nfs4_put_stid(&deleg
->dl_stid
);
4778 if (cl
->cl_minorversion
)
4779 status
= nfserr_deleg_revoked
;
4782 flags
= share_access_to_flags(open
->op_share_access
);
4783 status
= nfs4_check_delegmode(deleg
, flags
);
4785 nfs4_put_stid(&deleg
->dl_stid
);
4790 if (!nfsd4_is_deleg_cur(open
))
4794 open
->op_openowner
->oo_flags
|= NFS4_OO_CONFIRMED
;
4798 static inline int nfs4_access_to_access(u32 nfs4_access
)
4802 if (nfs4_access
& NFS4_SHARE_ACCESS_READ
)
4803 flags
|= NFSD_MAY_READ
;
4804 if (nfs4_access
& NFS4_SHARE_ACCESS_WRITE
)
4805 flags
|= NFSD_MAY_WRITE
;
4809 static inline __be32
4810 nfsd4_truncate(struct svc_rqst
*rqstp
, struct svc_fh
*fh
,
4811 struct nfsd4_open
*open
)
4813 struct iattr iattr
= {
4814 .ia_valid
= ATTR_SIZE
,
4817 if (!open
->op_truncate
)
4819 if (!(open
->op_share_access
& NFS4_SHARE_ACCESS_WRITE
))
4820 return nfserr_inval
;
4821 return nfsd_setattr(rqstp
, fh
, &iattr
, 0, (time64_t
)0);
4824 static __be32
nfs4_get_vfs_file(struct svc_rqst
*rqstp
, struct nfs4_file
*fp
,
4825 struct svc_fh
*cur_fh
, struct nfs4_ol_stateid
*stp
,
4826 struct nfsd4_open
*open
)
4828 struct nfsd_file
*nf
= NULL
;
4830 int oflag
= nfs4_access_to_omode(open
->op_share_access
);
4831 int access
= nfs4_access_to_access(open
->op_share_access
);
4832 unsigned char old_access_bmap
, old_deny_bmap
;
4834 spin_lock(&fp
->fi_lock
);
4837 * Are we trying to set a deny mode that would conflict with
4840 status
= nfs4_file_check_deny(fp
, open
->op_share_deny
);
4841 if (status
!= nfs_ok
) {
4842 spin_unlock(&fp
->fi_lock
);
4846 /* set access to the file */
4847 status
= nfs4_file_get_access(fp
, open
->op_share_access
);
4848 if (status
!= nfs_ok
) {
4849 spin_unlock(&fp
->fi_lock
);
4853 /* Set access bits in stateid */
4854 old_access_bmap
= stp
->st_access_bmap
;
4855 set_access(open
->op_share_access
, stp
);
4857 /* Set new deny mask */
4858 old_deny_bmap
= stp
->st_deny_bmap
;
4859 set_deny(open
->op_share_deny
, stp
);
4860 fp
->fi_share_deny
|= (open
->op_share_deny
& NFS4_SHARE_DENY_BOTH
);
4862 if (!fp
->fi_fds
[oflag
]) {
4863 spin_unlock(&fp
->fi_lock
);
4864 status
= nfsd_file_acquire(rqstp
, cur_fh
, access
, &nf
);
4866 goto out_put_access
;
4867 spin_lock(&fp
->fi_lock
);
4868 if (!fp
->fi_fds
[oflag
]) {
4869 fp
->fi_fds
[oflag
] = nf
;
4873 spin_unlock(&fp
->fi_lock
);
4877 status
= nfsd4_truncate(rqstp
, cur_fh
, open
);
4879 goto out_put_access
;
4883 stp
->st_access_bmap
= old_access_bmap
;
4884 nfs4_file_put_access(fp
, open
->op_share_access
);
4885 reset_union_bmap_deny(bmap_to_share_mode(old_deny_bmap
), stp
);
4890 nfs4_upgrade_open(struct svc_rqst
*rqstp
, struct nfs4_file
*fp
, struct svc_fh
*cur_fh
, struct nfs4_ol_stateid
*stp
, struct nfsd4_open
*open
)
4893 unsigned char old_deny_bmap
= stp
->st_deny_bmap
;
4895 if (!test_access(open
->op_share_access
, stp
))
4896 return nfs4_get_vfs_file(rqstp
, fp
, cur_fh
, stp
, open
);
4898 /* test and set deny mode */
4899 spin_lock(&fp
->fi_lock
);
4900 status
= nfs4_file_check_deny(fp
, open
->op_share_deny
);
4901 if (status
== nfs_ok
) {
4902 set_deny(open
->op_share_deny
, stp
);
4903 fp
->fi_share_deny
|=
4904 (open
->op_share_deny
& NFS4_SHARE_DENY_BOTH
);
4906 spin_unlock(&fp
->fi_lock
);
4908 if (status
!= nfs_ok
)
4911 status
= nfsd4_truncate(rqstp
, cur_fh
, open
);
4912 if (status
!= nfs_ok
)
4913 reset_union_bmap_deny(old_deny_bmap
, stp
);
4917 /* Should we give out recallable state?: */
4918 static bool nfsd4_cb_channel_good(struct nfs4_client
*clp
)
4920 if (clp
->cl_cb_state
== NFSD4_CB_UP
)
4923 * In the sessions case, since we don't have to establish a
4924 * separate connection for callbacks, we assume it's OK
4925 * until we hear otherwise:
4927 return clp
->cl_minorversion
&& clp
->cl_cb_state
== NFSD4_CB_UNKNOWN
;
4930 static struct file_lock
*nfs4_alloc_init_lease(struct nfs4_delegation
*dp
,
4933 struct file_lock
*fl
;
4935 fl
= locks_alloc_lock();
4938 fl
->fl_lmops
= &nfsd_lease_mng_ops
;
4939 fl
->fl_flags
= FL_DELEG
;
4940 fl
->fl_type
= flag
== NFS4_OPEN_DELEGATE_READ
? F_RDLCK
: F_WRLCK
;
4941 fl
->fl_end
= OFFSET_MAX
;
4942 fl
->fl_owner
= (fl_owner_t
)dp
;
4943 fl
->fl_pid
= current
->tgid
;
4944 fl
->fl_file
= dp
->dl_stid
.sc_file
->fi_deleg_file
->nf_file
;
4948 static int nfsd4_check_conflicting_opens(struct nfs4_client
*clp
,
4949 struct nfs4_file
*fp
)
4951 struct nfs4_clnt_odstate
*co
;
4952 struct file
*f
= fp
->fi_deleg_file
->nf_file
;
4953 struct inode
*ino
= locks_inode(f
);
4954 int writes
= atomic_read(&ino
->i_writecount
);
4956 if (fp
->fi_fds
[O_WRONLY
])
4958 if (fp
->fi_fds
[O_RDWR
])
4962 spin_lock(&fp
->fi_lock
);
4963 list_for_each_entry(co
, &fp
->fi_clnt_odstate
, co_perfile
) {
4964 if (co
->co_client
!= clp
) {
4965 spin_unlock(&fp
->fi_lock
);
4969 spin_unlock(&fp
->fi_lock
);
4973 static struct nfs4_delegation
*
4974 nfs4_set_delegation(struct nfs4_client
*clp
, struct svc_fh
*fh
,
4975 struct nfs4_file
*fp
, struct nfs4_clnt_odstate
*odstate
)
4978 struct nfs4_delegation
*dp
;
4979 struct nfsd_file
*nf
;
4980 struct file_lock
*fl
;
4983 * The fi_had_conflict and nfs_get_existing_delegation checks
4984 * here are just optimizations; we'll need to recheck them at
4987 if (fp
->fi_had_conflict
)
4988 return ERR_PTR(-EAGAIN
);
4990 nf
= find_readable_file(fp
);
4993 * We probably could attempt another open and get a read
4994 * delegation, but for now, don't bother until the
4995 * client actually sends us one.
4997 return ERR_PTR(-EAGAIN
);
4999 spin_lock(&state_lock
);
5000 spin_lock(&fp
->fi_lock
);
5001 if (nfs4_delegation_exists(clp
, fp
))
5003 else if (!fp
->fi_deleg_file
) {
5004 fp
->fi_deleg_file
= nf
;
5005 /* increment early to prevent fi_deleg_file from being
5007 fp
->fi_delegees
= 1;
5011 spin_unlock(&fp
->fi_lock
);
5012 spin_unlock(&state_lock
);
5016 return ERR_PTR(status
);
5019 dp
= alloc_init_deleg(clp
, fp
, fh
, odstate
);
5023 fl
= nfs4_alloc_init_lease(dp
, NFS4_OPEN_DELEGATE_READ
);
5025 goto out_clnt_odstate
;
5027 status
= nfsd4_check_conflicting_opens(clp
, fp
);
5029 locks_free_lock(fl
);
5030 goto out_clnt_odstate
;
5032 status
= vfs_setlease(fp
->fi_deleg_file
->nf_file
, fl
->fl_type
, &fl
, NULL
);
5034 locks_free_lock(fl
);
5036 goto out_clnt_odstate
;
5037 status
= nfsd4_check_conflicting_opens(clp
, fp
);
5039 goto out_clnt_odstate
;
5041 spin_lock(&state_lock
);
5042 spin_lock(&fp
->fi_lock
);
5043 if (fp
->fi_had_conflict
)
5046 status
= hash_delegation_locked(dp
, fp
);
5047 spin_unlock(&fp
->fi_lock
);
5048 spin_unlock(&state_lock
);
5055 vfs_setlease(fp
->fi_deleg_file
->nf_file
, F_UNLCK
, NULL
, (void **)&dp
);
5057 put_clnt_odstate(dp
->dl_clnt_odstate
);
5058 nfs4_put_stid(&dp
->dl_stid
);
5061 return ERR_PTR(status
);
5064 static void nfsd4_open_deleg_none_ext(struct nfsd4_open
*open
, int status
)
5066 open
->op_delegate_type
= NFS4_OPEN_DELEGATE_NONE_EXT
;
5067 if (status
== -EAGAIN
)
5068 open
->op_why_no_deleg
= WND4_CONTENTION
;
5070 open
->op_why_no_deleg
= WND4_RESOURCE
;
5071 switch (open
->op_deleg_want
) {
5072 case NFS4_SHARE_WANT_READ_DELEG
:
5073 case NFS4_SHARE_WANT_WRITE_DELEG
:
5074 case NFS4_SHARE_WANT_ANY_DELEG
:
5076 case NFS4_SHARE_WANT_CANCEL
:
5077 open
->op_why_no_deleg
= WND4_CANCELLED
;
5079 case NFS4_SHARE_WANT_NO_DELEG
:
5086 * Attempt to hand out a delegation.
5088 * Note we don't support write delegations, and won't until the vfs has
5089 * proper support for them.
5092 nfs4_open_delegation(struct svc_fh
*fh
, struct nfsd4_open
*open
,
5093 struct nfs4_ol_stateid
*stp
)
5095 struct nfs4_delegation
*dp
;
5096 struct nfs4_openowner
*oo
= openowner(stp
->st_stateowner
);
5097 struct nfs4_client
*clp
= stp
->st_stid
.sc_client
;
5101 cb_up
= nfsd4_cb_channel_good(oo
->oo_owner
.so_client
);
5102 open
->op_recall
= 0;
5103 switch (open
->op_claim_type
) {
5104 case NFS4_OPEN_CLAIM_PREVIOUS
:
5106 open
->op_recall
= 1;
5107 if (open
->op_delegate_type
!= NFS4_OPEN_DELEGATE_READ
)
5110 case NFS4_OPEN_CLAIM_NULL
:
5111 case NFS4_OPEN_CLAIM_FH
:
5113 * Let's not give out any delegations till everyone's
5114 * had the chance to reclaim theirs, *and* until
5115 * NLM locks have all been reclaimed:
5117 if (locks_in_grace(clp
->net
))
5119 if (!cb_up
|| !(oo
->oo_flags
& NFS4_OO_CONFIRMED
))
5125 dp
= nfs4_set_delegation(clp
, fh
, stp
->st_stid
.sc_file
, stp
->st_clnt_odstate
);
5129 memcpy(&open
->op_delegate_stateid
, &dp
->dl_stid
.sc_stateid
, sizeof(dp
->dl_stid
.sc_stateid
));
5131 trace_nfsd_deleg_read(&dp
->dl_stid
.sc_stateid
);
5132 open
->op_delegate_type
= NFS4_OPEN_DELEGATE_READ
;
5133 nfs4_put_stid(&dp
->dl_stid
);
5136 open
->op_delegate_type
= NFS4_OPEN_DELEGATE_NONE
;
5137 if (open
->op_claim_type
== NFS4_OPEN_CLAIM_PREVIOUS
&&
5138 open
->op_delegate_type
!= NFS4_OPEN_DELEGATE_NONE
) {
5139 dprintk("NFSD: WARNING: refusing delegation reclaim\n");
5140 open
->op_recall
= 1;
5143 /* 4.1 client asking for a delegation? */
5144 if (open
->op_deleg_want
)
5145 nfsd4_open_deleg_none_ext(open
, status
);
5149 static void nfsd4_deleg_xgrade_none_ext(struct nfsd4_open
*open
,
5150 struct nfs4_delegation
*dp
)
5152 if (open
->op_deleg_want
== NFS4_SHARE_WANT_READ_DELEG
&&
5153 dp
->dl_type
== NFS4_OPEN_DELEGATE_WRITE
) {
5154 open
->op_delegate_type
= NFS4_OPEN_DELEGATE_NONE_EXT
;
5155 open
->op_why_no_deleg
= WND4_NOT_SUPP_DOWNGRADE
;
5156 } else if (open
->op_deleg_want
== NFS4_SHARE_WANT_WRITE_DELEG
&&
5157 dp
->dl_type
== NFS4_OPEN_DELEGATE_WRITE
) {
5158 open
->op_delegate_type
= NFS4_OPEN_DELEGATE_NONE_EXT
;
5159 open
->op_why_no_deleg
= WND4_NOT_SUPP_UPGRADE
;
5161 /* Otherwise the client must be confused wanting a delegation
5162 * it already has, therefore we don't return
5163 * NFS4_OPEN_DELEGATE_NONE_EXT and reason.
5168 nfsd4_process_open2(struct svc_rqst
*rqstp
, struct svc_fh
*current_fh
, struct nfsd4_open
*open
)
5170 struct nfsd4_compoundres
*resp
= rqstp
->rq_resp
;
5171 struct nfs4_client
*cl
= open
->op_openowner
->oo_owner
.so_client
;
5172 struct nfs4_file
*fp
= NULL
;
5173 struct nfs4_ol_stateid
*stp
= NULL
;
5174 struct nfs4_delegation
*dp
= NULL
;
5176 bool new_stp
= false;
5179 * Lookup file; if found, lookup stateid and check open request,
5180 * and check for delegations in the process of being recalled.
5181 * If not found, create the nfs4_file struct
5183 fp
= find_or_add_file(open
->op_file
, ¤t_fh
->fh_handle
);
5184 if (fp
!= open
->op_file
) {
5185 status
= nfs4_check_deleg(cl
, open
, &dp
);
5188 stp
= nfsd4_find_and_lock_existing_open(fp
, open
);
5190 open
->op_file
= NULL
;
5191 status
= nfserr_bad_stateid
;
5192 if (nfsd4_is_deleg_cur(open
))
5197 stp
= init_open_stateid(fp
, open
);
5203 * OPEN the file, or upgrade an existing OPEN.
5204 * If truncate fails, the OPEN fails.
5206 * stp is already locked.
5209 /* Stateid was found, this is an OPEN upgrade */
5210 status
= nfs4_upgrade_open(rqstp
, fp
, current_fh
, stp
, open
);
5212 mutex_unlock(&stp
->st_mutex
);
5216 status
= nfs4_get_vfs_file(rqstp
, fp
, current_fh
, stp
, open
);
5218 stp
->st_stid
.sc_type
= NFS4_CLOSED_STID
;
5219 release_open_stateid(stp
);
5220 mutex_unlock(&stp
->st_mutex
);
5224 stp
->st_clnt_odstate
= find_or_hash_clnt_odstate(fp
,
5226 if (stp
->st_clnt_odstate
== open
->op_odstate
)
5227 open
->op_odstate
= NULL
;
5230 nfs4_inc_and_copy_stateid(&open
->op_stateid
, &stp
->st_stid
);
5231 mutex_unlock(&stp
->st_mutex
);
5233 if (nfsd4_has_session(&resp
->cstate
)) {
5234 if (open
->op_deleg_want
& NFS4_SHARE_WANT_NO_DELEG
) {
5235 open
->op_delegate_type
= NFS4_OPEN_DELEGATE_NONE_EXT
;
5236 open
->op_why_no_deleg
= WND4_NOT_WANTED
;
5242 * Attempt to hand out a delegation. No error return, because the
5243 * OPEN succeeds even if we fail.
5245 nfs4_open_delegation(current_fh
, open
, stp
);
5248 trace_nfsd_open(&stp
->st_stid
.sc_stateid
);
5250 /* 4.1 client trying to upgrade/downgrade delegation? */
5251 if (open
->op_delegate_type
== NFS4_OPEN_DELEGATE_NONE
&& dp
&&
5252 open
->op_deleg_want
)
5253 nfsd4_deleg_xgrade_none_ext(open
, dp
);
5257 if (status
== 0 && open
->op_claim_type
== NFS4_OPEN_CLAIM_PREVIOUS
)
5258 open
->op_openowner
->oo_flags
|= NFS4_OO_CONFIRMED
;
5260 * To finish the open response, we just need to set the rflags.
5262 open
->op_rflags
= NFS4_OPEN_RESULT_LOCKTYPE_POSIX
;
5263 if (nfsd4_has_session(&resp
->cstate
))
5264 open
->op_rflags
|= NFS4_OPEN_RESULT_MAY_NOTIFY_LOCK
;
5265 else if (!(open
->op_openowner
->oo_flags
& NFS4_OO_CONFIRMED
))
5266 open
->op_rflags
|= NFS4_OPEN_RESULT_CONFIRM
;
5269 nfs4_put_stid(&dp
->dl_stid
);
5271 nfs4_put_stid(&stp
->st_stid
);
5276 void nfsd4_cleanup_open_state(struct nfsd4_compound_state
*cstate
,
5277 struct nfsd4_open
*open
)
5279 if (open
->op_openowner
) {
5280 struct nfs4_stateowner
*so
= &open
->op_openowner
->oo_owner
;
5282 nfsd4_cstate_assign_replay(cstate
, so
);
5283 nfs4_put_stateowner(so
);
5286 kmem_cache_free(file_slab
, open
->op_file
);
5288 nfs4_put_stid(&open
->op_stp
->st_stid
);
5289 if (open
->op_odstate
)
5290 kmem_cache_free(odstate_slab
, open
->op_odstate
);
5294 nfsd4_renew(struct svc_rqst
*rqstp
, struct nfsd4_compound_state
*cstate
,
5295 union nfsd4_op_u
*u
)
5297 clientid_t
*clid
= &u
->renew
;
5298 struct nfs4_client
*clp
;
5300 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
5302 trace_nfsd_clid_renew(clid
);
5303 status
= lookup_clientid(clid
, cstate
, nn
, false);
5307 status
= nfserr_cb_path_down
;
5308 if (!list_empty(&clp
->cl_delegations
)
5309 && clp
->cl_cb_state
!= NFSD4_CB_UP
)
5317 nfsd4_end_grace(struct nfsd_net
*nn
)
5319 /* do nothing if grace period already ended */
5320 if (nn
->grace_ended
)
5323 trace_nfsd_grace_complete(nn
);
5324 nn
->grace_ended
= true;
5326 * If the server goes down again right now, an NFSv4
5327 * client will still be allowed to reclaim after it comes back up,
5328 * even if it hasn't yet had a chance to reclaim state this time.
5331 nfsd4_record_grace_done(nn
);
5333 * At this point, NFSv4 clients can still reclaim. But if the
5334 * server crashes, any that have not yet reclaimed will be out
5335 * of luck on the next boot.
5337 * (NFSv4.1+ clients are considered to have reclaimed once they
5338 * call RECLAIM_COMPLETE. NFSv4.0 clients are considered to
5339 * have reclaimed after their first OPEN.)
5341 locks_end_grace(&nn
->nfsd4_manager
);
5343 * At this point, and once lockd and/or any other containers
5344 * exit their grace period, further reclaims will fail and
5345 * regular locking can resume.
5350 * If we've waited a lease period but there are still clients trying to
5351 * reclaim, wait a little longer to give them a chance to finish.
5353 static bool clients_still_reclaiming(struct nfsd_net
*nn
)
5355 time64_t double_grace_period_end
= nn
->boot_time
+
5356 2 * nn
->nfsd4_lease
;
5358 if (nn
->track_reclaim_completes
&&
5359 atomic_read(&nn
->nr_reclaim_complete
) ==
5360 nn
->reclaim_str_hashtbl_size
)
5362 if (!nn
->somebody_reclaimed
)
5364 nn
->somebody_reclaimed
= false;
5366 * If we've given them *two* lease times to reclaim, and they're
5367 * still not done, give up:
5369 if (ktime_get_boottime_seconds() > double_grace_period_end
)
5375 nfs4_laundromat(struct nfsd_net
*nn
)
5377 struct nfs4_client
*clp
;
5378 struct nfs4_openowner
*oo
;
5379 struct nfs4_delegation
*dp
;
5380 struct nfs4_ol_stateid
*stp
;
5381 struct nfsd4_blocked_lock
*nbl
;
5382 struct list_head
*pos
, *next
, reaplist
;
5383 time64_t cutoff
= ktime_get_boottime_seconds() - nn
->nfsd4_lease
;
5384 time64_t t
, new_timeo
= nn
->nfsd4_lease
;
5385 struct nfs4_cpntf_state
*cps
;
5386 copy_stateid_t
*cps_t
;
5389 if (clients_still_reclaiming(nn
)) {
5393 nfsd4_end_grace(nn
);
5394 INIT_LIST_HEAD(&reaplist
);
5396 spin_lock(&nn
->s2s_cp_lock
);
5397 idr_for_each_entry(&nn
->s2s_cp_stateids
, cps_t
, i
) {
5398 cps
= container_of(cps_t
, struct nfs4_cpntf_state
, cp_stateid
);
5399 if (cps
->cp_stateid
.sc_type
== NFS4_COPYNOTIFY_STID
&&
5400 cps
->cpntf_time
> cutoff
)
5401 _free_cpntf_state_locked(nn
, cps
);
5403 spin_unlock(&nn
->s2s_cp_lock
);
5405 spin_lock(&nn
->client_lock
);
5406 list_for_each_safe(pos
, next
, &nn
->client_lru
) {
5407 clp
= list_entry(pos
, struct nfs4_client
, cl_lru
);
5408 if (clp
->cl_time
> cutoff
) {
5409 t
= clp
->cl_time
- cutoff
;
5410 new_timeo
= min(new_timeo
, t
);
5413 if (mark_client_expired_locked(clp
)) {
5414 trace_nfsd_clid_expired(&clp
->cl_clientid
);
5417 list_add(&clp
->cl_lru
, &reaplist
);
5419 spin_unlock(&nn
->client_lock
);
5420 list_for_each_safe(pos
, next
, &reaplist
) {
5421 clp
= list_entry(pos
, struct nfs4_client
, cl_lru
);
5422 trace_nfsd_clid_purged(&clp
->cl_clientid
);
5423 list_del_init(&clp
->cl_lru
);
5426 spin_lock(&state_lock
);
5427 list_for_each_safe(pos
, next
, &nn
->del_recall_lru
) {
5428 dp
= list_entry (pos
, struct nfs4_delegation
, dl_recall_lru
);
5429 if (dp
->dl_time
> cutoff
) {
5430 t
= dp
->dl_time
- cutoff
;
5431 new_timeo
= min(new_timeo
, t
);
5434 WARN_ON(!unhash_delegation_locked(dp
));
5435 list_add(&dp
->dl_recall_lru
, &reaplist
);
5437 spin_unlock(&state_lock
);
5438 while (!list_empty(&reaplist
)) {
5439 dp
= list_first_entry(&reaplist
, struct nfs4_delegation
,
5441 list_del_init(&dp
->dl_recall_lru
);
5442 revoke_delegation(dp
);
5445 spin_lock(&nn
->client_lock
);
5446 while (!list_empty(&nn
->close_lru
)) {
5447 oo
= list_first_entry(&nn
->close_lru
, struct nfs4_openowner
,
5449 if (oo
->oo_time
> cutoff
) {
5450 t
= oo
->oo_time
- cutoff
;
5451 new_timeo
= min(new_timeo
, t
);
5454 list_del_init(&oo
->oo_close_lru
);
5455 stp
= oo
->oo_last_closed_stid
;
5456 oo
->oo_last_closed_stid
= NULL
;
5457 spin_unlock(&nn
->client_lock
);
5458 nfs4_put_stid(&stp
->st_stid
);
5459 spin_lock(&nn
->client_lock
);
5461 spin_unlock(&nn
->client_lock
);
5464 * It's possible for a client to try and acquire an already held lock
5465 * that is being held for a long time, and then lose interest in it.
5466 * So, we clean out any un-revisited request after a lease period
5467 * under the assumption that the client is no longer interested.
5469 * RFC5661, sec. 9.6 states that the client must not rely on getting
5470 * notifications and must continue to poll for locks, even when the
5471 * server supports them. Thus this shouldn't lead to clients blocking
5472 * indefinitely once the lock does become free.
5474 BUG_ON(!list_empty(&reaplist
));
5475 spin_lock(&nn
->blocked_locks_lock
);
5476 while (!list_empty(&nn
->blocked_locks_lru
)) {
5477 nbl
= list_first_entry(&nn
->blocked_locks_lru
,
5478 struct nfsd4_blocked_lock
, nbl_lru
);
5479 if (nbl
->nbl_time
> cutoff
) {
5480 t
= nbl
->nbl_time
- cutoff
;
5481 new_timeo
= min(new_timeo
, t
);
5484 list_move(&nbl
->nbl_lru
, &reaplist
);
5485 list_del_init(&nbl
->nbl_list
);
5487 spin_unlock(&nn
->blocked_locks_lock
);
5489 while (!list_empty(&reaplist
)) {
5490 nbl
= list_first_entry(&reaplist
,
5491 struct nfsd4_blocked_lock
, nbl_lru
);
5492 list_del_init(&nbl
->nbl_lru
);
5493 free_blocked_lock(nbl
);
5496 new_timeo
= max_t(time64_t
, new_timeo
, NFSD_LAUNDROMAT_MINTIMEOUT
);
5500 static struct workqueue_struct
*laundry_wq
;
5501 static void laundromat_main(struct work_struct
*);
5504 laundromat_main(struct work_struct
*laundry
)
5507 struct delayed_work
*dwork
= to_delayed_work(laundry
);
5508 struct nfsd_net
*nn
= container_of(dwork
, struct nfsd_net
,
5511 t
= nfs4_laundromat(nn
);
5512 queue_delayed_work(laundry_wq
, &nn
->laundromat_work
, t
*HZ
);
5515 static inline __be32
nfs4_check_fh(struct svc_fh
*fhp
, struct nfs4_stid
*stp
)
5517 if (!fh_match(&fhp
->fh_handle
, &stp
->sc_file
->fi_fhandle
))
5518 return nfserr_bad_stateid
;
5523 access_permit_read(struct nfs4_ol_stateid
*stp
)
5525 return test_access(NFS4_SHARE_ACCESS_READ
, stp
) ||
5526 test_access(NFS4_SHARE_ACCESS_BOTH
, stp
) ||
5527 test_access(NFS4_SHARE_ACCESS_WRITE
, stp
);
5531 access_permit_write(struct nfs4_ol_stateid
*stp
)
5533 return test_access(NFS4_SHARE_ACCESS_WRITE
, stp
) ||
5534 test_access(NFS4_SHARE_ACCESS_BOTH
, stp
);
5538 __be32
nfs4_check_openmode(struct nfs4_ol_stateid
*stp
, int flags
)
5540 __be32 status
= nfserr_openmode
;
5542 /* For lock stateid's, we test the parent open, not the lock: */
5543 if (stp
->st_openstp
)
5544 stp
= stp
->st_openstp
;
5545 if ((flags
& WR_STATE
) && !access_permit_write(stp
))
5547 if ((flags
& RD_STATE
) && !access_permit_read(stp
))
5554 static inline __be32
5555 check_special_stateids(struct net
*net
, svc_fh
*current_fh
, stateid_t
*stateid
, int flags
)
5557 if (ONE_STATEID(stateid
) && (flags
& RD_STATE
))
5559 else if (opens_in_grace(net
)) {
5560 /* Answer in remaining cases depends on existence of
5561 * conflicting state; so we must wait out the grace period. */
5562 return nfserr_grace
;
5563 } else if (flags
& WR_STATE
)
5564 return nfs4_share_conflict(current_fh
,
5565 NFS4_SHARE_DENY_WRITE
);
5566 else /* (flags & RD_STATE) && ZERO_STATEID(stateid) */
5567 return nfs4_share_conflict(current_fh
,
5568 NFS4_SHARE_DENY_READ
);
5572 * Allow READ/WRITE during grace period on recovered state only for files
5573 * that are not able to provide mandatory locking.
5576 grace_disallows_io(struct net
*net
, struct inode
*inode
)
5578 return opens_in_grace(net
) && mandatory_lock(inode
);
5581 static __be32
check_stateid_generation(stateid_t
*in
, stateid_t
*ref
, bool has_session
)
5584 * When sessions are used the stateid generation number is ignored
5587 if (has_session
&& in
->si_generation
== 0)
5590 if (in
->si_generation
== ref
->si_generation
)
5593 /* If the client sends us a stateid from the future, it's buggy: */
5594 if (nfsd4_stateid_generation_after(in
, ref
))
5595 return nfserr_bad_stateid
;
5597 * However, we could see a stateid from the past, even from a
5598 * non-buggy client. For example, if the client sends a lock
5599 * while some IO is outstanding, the lock may bump si_generation
5600 * while the IO is still in flight. The client could avoid that
5601 * situation by waiting for responses on all the IO requests,
5602 * but better performance may result in retrying IO that
5603 * receives an old_stateid error if requests are rarely
5604 * reordered in flight:
5606 return nfserr_old_stateid
;
5609 static __be32
nfsd4_stid_check_stateid_generation(stateid_t
*in
, struct nfs4_stid
*s
, bool has_session
)
5613 spin_lock(&s
->sc_lock
);
5614 ret
= nfsd4_verify_open_stid(s
);
5616 ret
= check_stateid_generation(in
, &s
->sc_stateid
, has_session
);
5617 spin_unlock(&s
->sc_lock
);
5621 static __be32
nfsd4_check_openowner_confirmed(struct nfs4_ol_stateid
*ols
)
5623 if (ols
->st_stateowner
->so_is_open_owner
&&
5624 !(openowner(ols
->st_stateowner
)->oo_flags
& NFS4_OO_CONFIRMED
))
5625 return nfserr_bad_stateid
;
5629 static __be32
nfsd4_validate_stateid(struct nfs4_client
*cl
, stateid_t
*stateid
)
5631 struct nfs4_stid
*s
;
5632 __be32 status
= nfserr_bad_stateid
;
5634 if (ZERO_STATEID(stateid
) || ONE_STATEID(stateid
) ||
5635 CLOSE_STATEID(stateid
))
5637 if (!same_clid(&stateid
->si_opaque
.so_clid
, &cl
->cl_clientid
))
5639 spin_lock(&cl
->cl_lock
);
5640 s
= find_stateid_locked(cl
, stateid
);
5643 status
= nfsd4_stid_check_stateid_generation(stateid
, s
, 1);
5646 switch (s
->sc_type
) {
5647 case NFS4_DELEG_STID
:
5650 case NFS4_REVOKED_DELEG_STID
:
5651 status
= nfserr_deleg_revoked
;
5653 case NFS4_OPEN_STID
:
5654 case NFS4_LOCK_STID
:
5655 status
= nfsd4_check_openowner_confirmed(openlockstateid(s
));
5658 printk("unknown stateid type %x\n", s
->sc_type
);
5660 case NFS4_CLOSED_STID
:
5661 case NFS4_CLOSED_DELEG_STID
:
5662 status
= nfserr_bad_stateid
;
5665 spin_unlock(&cl
->cl_lock
);
5670 nfsd4_lookup_stateid(struct nfsd4_compound_state
*cstate
,
5671 stateid_t
*stateid
, unsigned char typemask
,
5672 struct nfs4_stid
**s
, struct nfsd_net
*nn
)
5675 bool return_revoked
= false;
5678 * only return revoked delegations if explicitly asked.
5679 * otherwise we report revoked or bad_stateid status.
5681 if (typemask
& NFS4_REVOKED_DELEG_STID
)
5682 return_revoked
= true;
5683 else if (typemask
& NFS4_DELEG_STID
)
5684 typemask
|= NFS4_REVOKED_DELEG_STID
;
5686 if (ZERO_STATEID(stateid
) || ONE_STATEID(stateid
) ||
5687 CLOSE_STATEID(stateid
))
5688 return nfserr_bad_stateid
;
5689 status
= lookup_clientid(&stateid
->si_opaque
.so_clid
, cstate
, nn
,
5691 if (status
== nfserr_stale_clientid
) {
5692 if (cstate
->session
)
5693 return nfserr_bad_stateid
;
5694 return nfserr_stale_stateid
;
5698 *s
= find_stateid_by_type(cstate
->clp
, stateid
, typemask
);
5700 return nfserr_bad_stateid
;
5701 if (((*s
)->sc_type
== NFS4_REVOKED_DELEG_STID
) && !return_revoked
) {
5703 if (cstate
->minorversion
)
5704 return nfserr_deleg_revoked
;
5705 return nfserr_bad_stateid
;
5710 static struct nfsd_file
*
5711 nfs4_find_file(struct nfs4_stid
*s
, int flags
)
5716 switch (s
->sc_type
) {
5717 case NFS4_DELEG_STID
:
5718 if (WARN_ON_ONCE(!s
->sc_file
->fi_deleg_file
))
5720 return nfsd_file_get(s
->sc_file
->fi_deleg_file
);
5721 case NFS4_OPEN_STID
:
5722 case NFS4_LOCK_STID
:
5723 if (flags
& RD_STATE
)
5724 return find_readable_file(s
->sc_file
);
5726 return find_writeable_file(s
->sc_file
);
5733 nfs4_check_olstateid(struct nfs4_ol_stateid
*ols
, int flags
)
5737 status
= nfsd4_check_openowner_confirmed(ols
);
5740 return nfs4_check_openmode(ols
, flags
);
5744 nfs4_check_file(struct svc_rqst
*rqstp
, struct svc_fh
*fhp
, struct nfs4_stid
*s
,
5745 struct nfsd_file
**nfp
, int flags
)
5747 int acc
= (flags
& RD_STATE
) ? NFSD_MAY_READ
: NFSD_MAY_WRITE
;
5748 struct nfsd_file
*nf
;
5751 nf
= nfs4_find_file(s
, flags
);
5753 status
= nfsd_permission(rqstp
, fhp
->fh_export
, fhp
->fh_dentry
,
5754 acc
| NFSD_MAY_OWNER_OVERRIDE
);
5760 status
= nfsd_file_acquire(rqstp
, fhp
, acc
, &nf
);
5769 _free_cpntf_state_locked(struct nfsd_net
*nn
, struct nfs4_cpntf_state
*cps
)
5771 WARN_ON_ONCE(cps
->cp_stateid
.sc_type
!= NFS4_COPYNOTIFY_STID
);
5772 if (!refcount_dec_and_test(&cps
->cp_stateid
.sc_count
))
5774 list_del(&cps
->cp_list
);
5775 idr_remove(&nn
->s2s_cp_stateids
,
5776 cps
->cp_stateid
.stid
.si_opaque
.so_id
);
5780 * A READ from an inter server to server COPY will have a
5781 * copy stateid. Look up the copy notify stateid from the
5782 * idr structure and take a reference on it.
5784 __be32
manage_cpntf_state(struct nfsd_net
*nn
, stateid_t
*st
,
5785 struct nfs4_client
*clp
,
5786 struct nfs4_cpntf_state
**cps
)
5788 copy_stateid_t
*cps_t
;
5789 struct nfs4_cpntf_state
*state
= NULL
;
5791 if (st
->si_opaque
.so_clid
.cl_id
!= nn
->s2s_cp_cl_id
)
5792 return nfserr_bad_stateid
;
5793 spin_lock(&nn
->s2s_cp_lock
);
5794 cps_t
= idr_find(&nn
->s2s_cp_stateids
, st
->si_opaque
.so_id
);
5796 state
= container_of(cps_t
, struct nfs4_cpntf_state
,
5798 if (state
->cp_stateid
.sc_type
!= NFS4_COPYNOTIFY_STID
) {
5803 refcount_inc(&state
->cp_stateid
.sc_count
);
5805 _free_cpntf_state_locked(nn
, state
);
5808 spin_unlock(&nn
->s2s_cp_lock
);
5810 return nfserr_bad_stateid
;
5816 static __be32
find_cpntf_state(struct nfsd_net
*nn
, stateid_t
*st
,
5817 struct nfs4_stid
**stid
)
5820 struct nfs4_cpntf_state
*cps
= NULL
;
5821 struct nfsd4_compound_state cstate
;
5823 status
= manage_cpntf_state(nn
, st
, NULL
, &cps
);
5827 cps
->cpntf_time
= ktime_get_boottime_seconds();
5828 memset(&cstate
, 0, sizeof(cstate
));
5829 status
= lookup_clientid(&cps
->cp_p_clid
, &cstate
, nn
, true);
5832 status
= nfsd4_lookup_stateid(&cstate
, &cps
->cp_p_stateid
,
5833 NFS4_DELEG_STID
|NFS4_OPEN_STID
|NFS4_LOCK_STID
,
5835 put_client_renew(cstate
.clp
);
5837 nfs4_put_cpntf_state(nn
, cps
);
5841 void nfs4_put_cpntf_state(struct nfsd_net
*nn
, struct nfs4_cpntf_state
*cps
)
5843 spin_lock(&nn
->s2s_cp_lock
);
5844 _free_cpntf_state_locked(nn
, cps
);
5845 spin_unlock(&nn
->s2s_cp_lock
);
5849 * Checks for stateid operations
5852 nfs4_preprocess_stateid_op(struct svc_rqst
*rqstp
,
5853 struct nfsd4_compound_state
*cstate
, struct svc_fh
*fhp
,
5854 stateid_t
*stateid
, int flags
, struct nfsd_file
**nfp
,
5855 struct nfs4_stid
**cstid
)
5857 struct inode
*ino
= d_inode(fhp
->fh_dentry
);
5858 struct net
*net
= SVC_NET(rqstp
);
5859 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
5860 struct nfs4_stid
*s
= NULL
;
5866 if (grace_disallows_io(net
, ino
))
5867 return nfserr_grace
;
5869 if (ZERO_STATEID(stateid
) || ONE_STATEID(stateid
)) {
5870 status
= check_special_stateids(net
, fhp
, stateid
, flags
);
5874 status
= nfsd4_lookup_stateid(cstate
, stateid
,
5875 NFS4_DELEG_STID
|NFS4_OPEN_STID
|NFS4_LOCK_STID
,
5877 if (status
== nfserr_bad_stateid
)
5878 status
= find_cpntf_state(nn
, stateid
, &s
);
5881 status
= nfsd4_stid_check_stateid_generation(stateid
, s
,
5882 nfsd4_has_session(cstate
));
5886 switch (s
->sc_type
) {
5887 case NFS4_DELEG_STID
:
5888 status
= nfs4_check_delegmode(delegstateid(s
), flags
);
5890 case NFS4_OPEN_STID
:
5891 case NFS4_LOCK_STID
:
5892 status
= nfs4_check_olstateid(openlockstateid(s
), flags
);
5895 status
= nfserr_bad_stateid
;
5900 status
= nfs4_check_fh(fhp
, s
);
5903 if (status
== nfs_ok
&& nfp
)
5904 status
= nfs4_check_file(rqstp
, fhp
, s
, nfp
, flags
);
5907 if (!status
&& cstid
)
5916 * Test if the stateid is valid
5919 nfsd4_test_stateid(struct svc_rqst
*rqstp
, struct nfsd4_compound_state
*cstate
,
5920 union nfsd4_op_u
*u
)
5922 struct nfsd4_test_stateid
*test_stateid
= &u
->test_stateid
;
5923 struct nfsd4_test_stateid_id
*stateid
;
5924 struct nfs4_client
*cl
= cstate
->session
->se_client
;
5926 list_for_each_entry(stateid
, &test_stateid
->ts_stateid_list
, ts_id_list
)
5927 stateid
->ts_id_status
=
5928 nfsd4_validate_stateid(cl
, &stateid
->ts_id_stateid
);
5934 nfsd4_free_lock_stateid(stateid_t
*stateid
, struct nfs4_stid
*s
)
5936 struct nfs4_ol_stateid
*stp
= openlockstateid(s
);
5939 ret
= nfsd4_lock_ol_stateid(stp
);
5943 ret
= check_stateid_generation(stateid
, &s
->sc_stateid
, 1);
5947 ret
= nfserr_locks_held
;
5948 if (check_for_locks(stp
->st_stid
.sc_file
,
5949 lockowner(stp
->st_stateowner
)))
5952 release_lock_stateid(stp
);
5956 mutex_unlock(&stp
->st_mutex
);
5963 nfsd4_free_stateid(struct svc_rqst
*rqstp
, struct nfsd4_compound_state
*cstate
,
5964 union nfsd4_op_u
*u
)
5966 struct nfsd4_free_stateid
*free_stateid
= &u
->free_stateid
;
5967 stateid_t
*stateid
= &free_stateid
->fr_stateid
;
5968 struct nfs4_stid
*s
;
5969 struct nfs4_delegation
*dp
;
5970 struct nfs4_client
*cl
= cstate
->session
->se_client
;
5971 __be32 ret
= nfserr_bad_stateid
;
5973 spin_lock(&cl
->cl_lock
);
5974 s
= find_stateid_locked(cl
, stateid
);
5977 spin_lock(&s
->sc_lock
);
5978 switch (s
->sc_type
) {
5979 case NFS4_DELEG_STID
:
5980 ret
= nfserr_locks_held
;
5982 case NFS4_OPEN_STID
:
5983 ret
= check_stateid_generation(stateid
, &s
->sc_stateid
, 1);
5986 ret
= nfserr_locks_held
;
5988 case NFS4_LOCK_STID
:
5989 spin_unlock(&s
->sc_lock
);
5990 refcount_inc(&s
->sc_count
);
5991 spin_unlock(&cl
->cl_lock
);
5992 ret
= nfsd4_free_lock_stateid(stateid
, s
);
5994 case NFS4_REVOKED_DELEG_STID
:
5995 spin_unlock(&s
->sc_lock
);
5996 dp
= delegstateid(s
);
5997 list_del_init(&dp
->dl_recall_lru
);
5998 spin_unlock(&cl
->cl_lock
);
6002 /* Default falls through and returns nfserr_bad_stateid */
6004 spin_unlock(&s
->sc_lock
);
6006 spin_unlock(&cl
->cl_lock
);
6014 return (type
== NFS4_READW_LT
|| type
== NFS4_READ_LT
) ?
6015 RD_STATE
: WR_STATE
;
6018 static __be32
nfs4_seqid_op_checks(struct nfsd4_compound_state
*cstate
, stateid_t
*stateid
, u32 seqid
, struct nfs4_ol_stateid
*stp
)
6020 struct svc_fh
*current_fh
= &cstate
->current_fh
;
6021 struct nfs4_stateowner
*sop
= stp
->st_stateowner
;
6024 status
= nfsd4_check_seqid(cstate
, sop
, seqid
);
6027 status
= nfsd4_lock_ol_stateid(stp
);
6028 if (status
!= nfs_ok
)
6030 status
= check_stateid_generation(stateid
, &stp
->st_stid
.sc_stateid
, nfsd4_has_session(cstate
));
6031 if (status
== nfs_ok
)
6032 status
= nfs4_check_fh(current_fh
, &stp
->st_stid
);
6033 if (status
!= nfs_ok
)
6034 mutex_unlock(&stp
->st_mutex
);
6039 * Checks for sequence id mutating operations.
6042 nfs4_preprocess_seqid_op(struct nfsd4_compound_state
*cstate
, u32 seqid
,
6043 stateid_t
*stateid
, char typemask
,
6044 struct nfs4_ol_stateid
**stpp
,
6045 struct nfsd_net
*nn
)
6048 struct nfs4_stid
*s
;
6049 struct nfs4_ol_stateid
*stp
= NULL
;
6051 trace_nfsd_preprocess(seqid
, stateid
);
6054 status
= nfsd4_lookup_stateid(cstate
, stateid
, typemask
, &s
, nn
);
6057 stp
= openlockstateid(s
);
6058 nfsd4_cstate_assign_replay(cstate
, stp
->st_stateowner
);
6060 status
= nfs4_seqid_op_checks(cstate
, stateid
, seqid
, stp
);
6064 nfs4_put_stid(&stp
->st_stid
);
6068 static __be32
nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state
*cstate
, u32 seqid
,
6069 stateid_t
*stateid
, struct nfs4_ol_stateid
**stpp
, struct nfsd_net
*nn
)
6072 struct nfs4_openowner
*oo
;
6073 struct nfs4_ol_stateid
*stp
;
6075 status
= nfs4_preprocess_seqid_op(cstate
, seqid
, stateid
,
6076 NFS4_OPEN_STID
, &stp
, nn
);
6079 oo
= openowner(stp
->st_stateowner
);
6080 if (!(oo
->oo_flags
& NFS4_OO_CONFIRMED
)) {
6081 mutex_unlock(&stp
->st_mutex
);
6082 nfs4_put_stid(&stp
->st_stid
);
6083 return nfserr_bad_stateid
;
6090 nfsd4_open_confirm(struct svc_rqst
*rqstp
, struct nfsd4_compound_state
*cstate
,
6091 union nfsd4_op_u
*u
)
6093 struct nfsd4_open_confirm
*oc
= &u
->open_confirm
;
6095 struct nfs4_openowner
*oo
;
6096 struct nfs4_ol_stateid
*stp
;
6097 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
6099 dprintk("NFSD: nfsd4_open_confirm on file %pd\n",
6100 cstate
->current_fh
.fh_dentry
);
6102 status
= fh_verify(rqstp
, &cstate
->current_fh
, S_IFREG
, 0);
6106 status
= nfs4_preprocess_seqid_op(cstate
,
6107 oc
->oc_seqid
, &oc
->oc_req_stateid
,
6108 NFS4_OPEN_STID
, &stp
, nn
);
6111 oo
= openowner(stp
->st_stateowner
);
6112 status
= nfserr_bad_stateid
;
6113 if (oo
->oo_flags
& NFS4_OO_CONFIRMED
) {
6114 mutex_unlock(&stp
->st_mutex
);
6117 oo
->oo_flags
|= NFS4_OO_CONFIRMED
;
6118 nfs4_inc_and_copy_stateid(&oc
->oc_resp_stateid
, &stp
->st_stid
);
6119 mutex_unlock(&stp
->st_mutex
);
6120 trace_nfsd_open_confirm(oc
->oc_seqid
, &stp
->st_stid
.sc_stateid
);
6121 nfsd4_client_record_create(oo
->oo_owner
.so_client
);
6124 nfs4_put_stid(&stp
->st_stid
);
6126 nfsd4_bump_seqid(cstate
, status
);
6130 static inline void nfs4_stateid_downgrade_bit(struct nfs4_ol_stateid
*stp
, u32 access
)
6132 if (!test_access(access
, stp
))
6134 nfs4_file_put_access(stp
->st_stid
.sc_file
, access
);
6135 clear_access(access
, stp
);
6138 static inline void nfs4_stateid_downgrade(struct nfs4_ol_stateid
*stp
, u32 to_access
)
6140 switch (to_access
) {
6141 case NFS4_SHARE_ACCESS_READ
:
6142 nfs4_stateid_downgrade_bit(stp
, NFS4_SHARE_ACCESS_WRITE
);
6143 nfs4_stateid_downgrade_bit(stp
, NFS4_SHARE_ACCESS_BOTH
);
6145 case NFS4_SHARE_ACCESS_WRITE
:
6146 nfs4_stateid_downgrade_bit(stp
, NFS4_SHARE_ACCESS_READ
);
6147 nfs4_stateid_downgrade_bit(stp
, NFS4_SHARE_ACCESS_BOTH
);
6149 case NFS4_SHARE_ACCESS_BOTH
:
6157 nfsd4_open_downgrade(struct svc_rqst
*rqstp
,
6158 struct nfsd4_compound_state
*cstate
, union nfsd4_op_u
*u
)
6160 struct nfsd4_open_downgrade
*od
= &u
->open_downgrade
;
6162 struct nfs4_ol_stateid
*stp
;
6163 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
6165 dprintk("NFSD: nfsd4_open_downgrade on file %pd\n",
6166 cstate
->current_fh
.fh_dentry
);
6168 /* We don't yet support WANT bits: */
6169 if (od
->od_deleg_want
)
6170 dprintk("NFSD: %s: od_deleg_want=0x%x ignored\n", __func__
,
6173 status
= nfs4_preprocess_confirmed_seqid_op(cstate
, od
->od_seqid
,
6174 &od
->od_stateid
, &stp
, nn
);
6177 status
= nfserr_inval
;
6178 if (!test_access(od
->od_share_access
, stp
)) {
6179 dprintk("NFSD: access not a subset of current bitmap: 0x%hhx, input access=%08x\n",
6180 stp
->st_access_bmap
, od
->od_share_access
);
6183 if (!test_deny(od
->od_share_deny
, stp
)) {
6184 dprintk("NFSD: deny not a subset of current bitmap: 0x%hhx, input deny=%08x\n",
6185 stp
->st_deny_bmap
, od
->od_share_deny
);
6188 nfs4_stateid_downgrade(stp
, od
->od_share_access
);
6189 reset_union_bmap_deny(od
->od_share_deny
, stp
);
6190 nfs4_inc_and_copy_stateid(&od
->od_stateid
, &stp
->st_stid
);
6193 mutex_unlock(&stp
->st_mutex
);
6194 nfs4_put_stid(&stp
->st_stid
);
6196 nfsd4_bump_seqid(cstate
, status
);
6200 static void nfsd4_close_open_stateid(struct nfs4_ol_stateid
*s
)
6202 struct nfs4_client
*clp
= s
->st_stid
.sc_client
;
6204 LIST_HEAD(reaplist
);
6206 spin_lock(&clp
->cl_lock
);
6207 unhashed
= unhash_open_stateid(s
, &reaplist
);
6209 if (clp
->cl_minorversion
) {
6211 put_ol_stateid_locked(s
, &reaplist
);
6212 spin_unlock(&clp
->cl_lock
);
6213 free_ol_stateid_reaplist(&reaplist
);
6215 spin_unlock(&clp
->cl_lock
);
6216 free_ol_stateid_reaplist(&reaplist
);
6218 move_to_close_lru(s
, clp
->net
);
6223 * nfs4_unlock_state() called after encode
6226 nfsd4_close(struct svc_rqst
*rqstp
, struct nfsd4_compound_state
*cstate
,
6227 union nfsd4_op_u
*u
)
6229 struct nfsd4_close
*close
= &u
->close
;
6231 struct nfs4_ol_stateid
*stp
;
6232 struct net
*net
= SVC_NET(rqstp
);
6233 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
6235 dprintk("NFSD: nfsd4_close on file %pd\n",
6236 cstate
->current_fh
.fh_dentry
);
6238 status
= nfs4_preprocess_seqid_op(cstate
, close
->cl_seqid
,
6240 NFS4_OPEN_STID
|NFS4_CLOSED_STID
,
6242 nfsd4_bump_seqid(cstate
, status
);
6246 stp
->st_stid
.sc_type
= NFS4_CLOSED_STID
;
6249 * Technically we don't _really_ have to increment or copy it, since
6250 * it should just be gone after this operation and we clobber the
6251 * copied value below, but we continue to do so here just to ensure
6252 * that racing ops see that there was a state change.
6254 nfs4_inc_and_copy_stateid(&close
->cl_stateid
, &stp
->st_stid
);
6256 nfsd4_close_open_stateid(stp
);
6257 mutex_unlock(&stp
->st_mutex
);
6259 /* v4.1+ suggests that we send a special stateid in here, since the
6260 * clients should just ignore this anyway. Since this is not useful
6261 * for v4.0 clients either, we set it to the special close_stateid
6264 * See RFC5661 section 18.2.4, and RFC7530 section 16.2.5
6266 memcpy(&close
->cl_stateid
, &close_stateid
, sizeof(close
->cl_stateid
));
6268 /* put reference from nfs4_preprocess_seqid_op */
6269 nfs4_put_stid(&stp
->st_stid
);
6275 nfsd4_delegreturn(struct svc_rqst
*rqstp
, struct nfsd4_compound_state
*cstate
,
6276 union nfsd4_op_u
*u
)
6278 struct nfsd4_delegreturn
*dr
= &u
->delegreturn
;
6279 struct nfs4_delegation
*dp
;
6280 stateid_t
*stateid
= &dr
->dr_stateid
;
6281 struct nfs4_stid
*s
;
6283 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
6285 if ((status
= fh_verify(rqstp
, &cstate
->current_fh
, S_IFREG
, 0)))
6288 status
= nfsd4_lookup_stateid(cstate
, stateid
, NFS4_DELEG_STID
, &s
, nn
);
6291 dp
= delegstateid(s
);
6292 status
= nfsd4_stid_check_stateid_generation(stateid
, &dp
->dl_stid
, nfsd4_has_session(cstate
));
6296 destroy_delegation(dp
);
6298 nfs4_put_stid(&dp
->dl_stid
);
6304 end_offset(u64 start
, u64 len
)
6309 return end
>= start
? end
: NFS4_MAX_UINT64
;
6312 /* last octet in a range */
6314 last_byte_offset(u64 start
, u64 len
)
6320 return end
> start
? end
- 1: NFS4_MAX_UINT64
;
6324 * TODO: Linux file offsets are _signed_ 64-bit quantities, which means that
6325 * we can't properly handle lock requests that go beyond the (2^63 - 1)-th
6326 * byte, because of sign extension problems. Since NFSv4 calls for 64-bit
6327 * locking, this prevents us from being completely protocol-compliant. The
6328 * real solution to this problem is to start using unsigned file offsets in
6329 * the VFS, but this is a very deep change!
6332 nfs4_transform_lock_offset(struct file_lock
*lock
)
6334 if (lock
->fl_start
< 0)
6335 lock
->fl_start
= OFFSET_MAX
;
6336 if (lock
->fl_end
< 0)
6337 lock
->fl_end
= OFFSET_MAX
;
6341 nfsd4_fl_get_owner(fl_owner_t owner
)
6343 struct nfs4_lockowner
*lo
= (struct nfs4_lockowner
*)owner
;
6345 nfs4_get_stateowner(&lo
->lo_owner
);
6350 nfsd4_fl_put_owner(fl_owner_t owner
)
6352 struct nfs4_lockowner
*lo
= (struct nfs4_lockowner
*)owner
;
6355 nfs4_put_stateowner(&lo
->lo_owner
);
6359 nfsd4_lm_notify(struct file_lock
*fl
)
6361 struct nfs4_lockowner
*lo
= (struct nfs4_lockowner
*)fl
->fl_owner
;
6362 struct net
*net
= lo
->lo_owner
.so_client
->net
;
6363 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
6364 struct nfsd4_blocked_lock
*nbl
= container_of(fl
,
6365 struct nfsd4_blocked_lock
, nbl_lock
);
6368 /* An empty list means that something else is going to be using it */
6369 spin_lock(&nn
->blocked_locks_lock
);
6370 if (!list_empty(&nbl
->nbl_list
)) {
6371 list_del_init(&nbl
->nbl_list
);
6372 list_del_init(&nbl
->nbl_lru
);
6375 spin_unlock(&nn
->blocked_locks_lock
);
6378 nfsd4_run_cb(&nbl
->nbl_cb
);
6381 static const struct lock_manager_operations nfsd_posix_mng_ops
= {
6382 .lm_notify
= nfsd4_lm_notify
,
6383 .lm_get_owner
= nfsd4_fl_get_owner
,
6384 .lm_put_owner
= nfsd4_fl_put_owner
,
6388 nfs4_set_lock_denied(struct file_lock
*fl
, struct nfsd4_lock_denied
*deny
)
6390 struct nfs4_lockowner
*lo
;
6392 if (fl
->fl_lmops
== &nfsd_posix_mng_ops
) {
6393 lo
= (struct nfs4_lockowner
*) fl
->fl_owner
;
6394 xdr_netobj_dup(&deny
->ld_owner
, &lo
->lo_owner
.so_owner
,
6396 if (!deny
->ld_owner
.data
)
6397 /* We just don't care that much */
6399 deny
->ld_clientid
= lo
->lo_owner
.so_client
->cl_clientid
;
6402 deny
->ld_owner
.len
= 0;
6403 deny
->ld_owner
.data
= NULL
;
6404 deny
->ld_clientid
.cl_boot
= 0;
6405 deny
->ld_clientid
.cl_id
= 0;
6407 deny
->ld_start
= fl
->fl_start
;
6408 deny
->ld_length
= NFS4_MAX_UINT64
;
6409 if (fl
->fl_end
!= NFS4_MAX_UINT64
)
6410 deny
->ld_length
= fl
->fl_end
- fl
->fl_start
+ 1;
6411 deny
->ld_type
= NFS4_READ_LT
;
6412 if (fl
->fl_type
!= F_RDLCK
)
6413 deny
->ld_type
= NFS4_WRITE_LT
;
6416 static struct nfs4_lockowner
*
6417 find_lockowner_str_locked(struct nfs4_client
*clp
, struct xdr_netobj
*owner
)
6419 unsigned int strhashval
= ownerstr_hashval(owner
);
6420 struct nfs4_stateowner
*so
;
6422 lockdep_assert_held(&clp
->cl_lock
);
6424 list_for_each_entry(so
, &clp
->cl_ownerstr_hashtbl
[strhashval
],
6426 if (so
->so_is_open_owner
)
6428 if (same_owner_str(so
, owner
))
6429 return lockowner(nfs4_get_stateowner(so
));
6434 static struct nfs4_lockowner
*
6435 find_lockowner_str(struct nfs4_client
*clp
, struct xdr_netobj
*owner
)
6437 struct nfs4_lockowner
*lo
;
6439 spin_lock(&clp
->cl_lock
);
6440 lo
= find_lockowner_str_locked(clp
, owner
);
6441 spin_unlock(&clp
->cl_lock
);
6445 static void nfs4_unhash_lockowner(struct nfs4_stateowner
*sop
)
6447 unhash_lockowner_locked(lockowner(sop
));
6450 static void nfs4_free_lockowner(struct nfs4_stateowner
*sop
)
6452 struct nfs4_lockowner
*lo
= lockowner(sop
);
6454 kmem_cache_free(lockowner_slab
, lo
);
6457 static const struct nfs4_stateowner_operations lockowner_ops
= {
6458 .so_unhash
= nfs4_unhash_lockowner
,
6459 .so_free
= nfs4_free_lockowner
,
6463 * Alloc a lock owner structure.
6464 * Called in nfsd4_lock - therefore, OPEN and OPEN_CONFIRM (if needed) has
6467 * strhashval = ownerstr_hashval
6469 static struct nfs4_lockowner
*
6470 alloc_init_lock_stateowner(unsigned int strhashval
, struct nfs4_client
*clp
,
6471 struct nfs4_ol_stateid
*open_stp
,
6472 struct nfsd4_lock
*lock
)
6474 struct nfs4_lockowner
*lo
, *ret
;
6476 lo
= alloc_stateowner(lockowner_slab
, &lock
->lk_new_owner
, clp
);
6479 INIT_LIST_HEAD(&lo
->lo_blocked
);
6480 INIT_LIST_HEAD(&lo
->lo_owner
.so_stateids
);
6481 lo
->lo_owner
.so_is_open_owner
= 0;
6482 lo
->lo_owner
.so_seqid
= lock
->lk_new_lock_seqid
;
6483 lo
->lo_owner
.so_ops
= &lockowner_ops
;
6484 spin_lock(&clp
->cl_lock
);
6485 ret
= find_lockowner_str_locked(clp
, &lock
->lk_new_owner
);
6487 list_add(&lo
->lo_owner
.so_strhash
,
6488 &clp
->cl_ownerstr_hashtbl
[strhashval
]);
6491 nfs4_free_stateowner(&lo
->lo_owner
);
6493 spin_unlock(&clp
->cl_lock
);
6497 static struct nfs4_ol_stateid
*
6498 find_lock_stateid(const struct nfs4_lockowner
*lo
,
6499 const struct nfs4_ol_stateid
*ost
)
6501 struct nfs4_ol_stateid
*lst
;
6503 lockdep_assert_held(&ost
->st_stid
.sc_client
->cl_lock
);
6505 /* If ost is not hashed, ost->st_locks will not be valid */
6506 if (!nfs4_ol_stateid_unhashed(ost
))
6507 list_for_each_entry(lst
, &ost
->st_locks
, st_locks
) {
6508 if (lst
->st_stateowner
== &lo
->lo_owner
) {
6509 refcount_inc(&lst
->st_stid
.sc_count
);
6516 static struct nfs4_ol_stateid
*
6517 init_lock_stateid(struct nfs4_ol_stateid
*stp
, struct nfs4_lockowner
*lo
,
6518 struct nfs4_file
*fp
, struct inode
*inode
,
6519 struct nfs4_ol_stateid
*open_stp
)
6521 struct nfs4_client
*clp
= lo
->lo_owner
.so_client
;
6522 struct nfs4_ol_stateid
*retstp
;
6524 mutex_init(&stp
->st_mutex
);
6525 mutex_lock_nested(&stp
->st_mutex
, OPEN_STATEID_MUTEX
);
6527 spin_lock(&clp
->cl_lock
);
6528 if (nfs4_ol_stateid_unhashed(open_stp
))
6530 retstp
= find_lock_stateid(lo
, open_stp
);
6533 refcount_inc(&stp
->st_stid
.sc_count
);
6534 stp
->st_stid
.sc_type
= NFS4_LOCK_STID
;
6535 stp
->st_stateowner
= nfs4_get_stateowner(&lo
->lo_owner
);
6537 stp
->st_stid
.sc_file
= fp
;
6538 stp
->st_access_bmap
= 0;
6539 stp
->st_deny_bmap
= open_stp
->st_deny_bmap
;
6540 stp
->st_openstp
= open_stp
;
6541 spin_lock(&fp
->fi_lock
);
6542 list_add(&stp
->st_locks
, &open_stp
->st_locks
);
6543 list_add(&stp
->st_perstateowner
, &lo
->lo_owner
.so_stateids
);
6544 list_add(&stp
->st_perfile
, &fp
->fi_stateids
);
6545 spin_unlock(&fp
->fi_lock
);
6546 spin_unlock(&clp
->cl_lock
);
6549 spin_unlock(&clp
->cl_lock
);
6550 if (nfsd4_lock_ol_stateid(retstp
) != nfs_ok
) {
6551 nfs4_put_stid(&retstp
->st_stid
);
6554 /* To keep mutex tracking happy */
6555 mutex_unlock(&stp
->st_mutex
);
6558 spin_unlock(&clp
->cl_lock
);
6559 mutex_unlock(&stp
->st_mutex
);
6563 static struct nfs4_ol_stateid
*
6564 find_or_create_lock_stateid(struct nfs4_lockowner
*lo
, struct nfs4_file
*fi
,
6565 struct inode
*inode
, struct nfs4_ol_stateid
*ost
,
6568 struct nfs4_stid
*ns
= NULL
;
6569 struct nfs4_ol_stateid
*lst
;
6570 struct nfs4_openowner
*oo
= openowner(ost
->st_stateowner
);
6571 struct nfs4_client
*clp
= oo
->oo_owner
.so_client
;
6574 spin_lock(&clp
->cl_lock
);
6575 lst
= find_lock_stateid(lo
, ost
);
6576 spin_unlock(&clp
->cl_lock
);
6578 if (nfsd4_lock_ol_stateid(lst
) == nfs_ok
)
6580 nfs4_put_stid(&lst
->st_stid
);
6582 ns
= nfs4_alloc_stid(clp
, stateid_slab
, nfs4_free_lock_stateid
);
6586 lst
= init_lock_stateid(openlockstateid(ns
), lo
, fi
, inode
, ost
);
6587 if (lst
== openlockstateid(ns
))
6596 check_lock_length(u64 offset
, u64 length
)
6598 return ((length
== 0) || ((length
!= NFS4_MAX_UINT64
) &&
6599 (length
> ~offset
)));
6602 static void get_lock_access(struct nfs4_ol_stateid
*lock_stp
, u32 access
)
6604 struct nfs4_file
*fp
= lock_stp
->st_stid
.sc_file
;
6606 lockdep_assert_held(&fp
->fi_lock
);
6608 if (test_access(access
, lock_stp
))
6610 __nfs4_file_get_access(fp
, access
);
6611 set_access(access
, lock_stp
);
6615 lookup_or_create_lock_state(struct nfsd4_compound_state
*cstate
,
6616 struct nfs4_ol_stateid
*ost
,
6617 struct nfsd4_lock
*lock
,
6618 struct nfs4_ol_stateid
**plst
, bool *new)
6621 struct nfs4_file
*fi
= ost
->st_stid
.sc_file
;
6622 struct nfs4_openowner
*oo
= openowner(ost
->st_stateowner
);
6623 struct nfs4_client
*cl
= oo
->oo_owner
.so_client
;
6624 struct inode
*inode
= d_inode(cstate
->current_fh
.fh_dentry
);
6625 struct nfs4_lockowner
*lo
;
6626 struct nfs4_ol_stateid
*lst
;
6627 unsigned int strhashval
;
6629 lo
= find_lockowner_str(cl
, &lock
->lk_new_owner
);
6631 strhashval
= ownerstr_hashval(&lock
->lk_new_owner
);
6632 lo
= alloc_init_lock_stateowner(strhashval
, cl
, ost
, lock
);
6634 return nfserr_jukebox
;
6636 /* with an existing lockowner, seqids must be the same */
6637 status
= nfserr_bad_seqid
;
6638 if (!cstate
->minorversion
&&
6639 lock
->lk_new_lock_seqid
!= lo
->lo_owner
.so_seqid
)
6643 lst
= find_or_create_lock_stateid(lo
, fi
, inode
, ost
, new);
6645 status
= nfserr_jukebox
;
6652 nfs4_put_stateowner(&lo
->lo_owner
);
6660 nfsd4_lock(struct svc_rqst
*rqstp
, struct nfsd4_compound_state
*cstate
,
6661 union nfsd4_op_u
*u
)
6663 struct nfsd4_lock
*lock
= &u
->lock
;
6664 struct nfs4_openowner
*open_sop
= NULL
;
6665 struct nfs4_lockowner
*lock_sop
= NULL
;
6666 struct nfs4_ol_stateid
*lock_stp
= NULL
;
6667 struct nfs4_ol_stateid
*open_stp
= NULL
;
6668 struct nfs4_file
*fp
;
6669 struct nfsd_file
*nf
= NULL
;
6670 struct nfsd4_blocked_lock
*nbl
= NULL
;
6671 struct file_lock
*file_lock
= NULL
;
6672 struct file_lock
*conflock
= NULL
;
6677 unsigned char fl_type
;
6678 unsigned int fl_flags
= FL_POSIX
;
6679 struct net
*net
= SVC_NET(rqstp
);
6680 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
6682 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
6683 (long long) lock
->lk_offset
,
6684 (long long) lock
->lk_length
);
6686 if (check_lock_length(lock
->lk_offset
, lock
->lk_length
))
6687 return nfserr_inval
;
6689 if ((status
= fh_verify(rqstp
, &cstate
->current_fh
,
6690 S_IFREG
, NFSD_MAY_LOCK
))) {
6691 dprintk("NFSD: nfsd4_lock: permission denied!\n");
6695 if (lock
->lk_is_new
) {
6696 if (nfsd4_has_session(cstate
))
6697 /* See rfc 5661 18.10.3: given clientid is ignored: */
6698 memcpy(&lock
->lk_new_clientid
,
6699 &cstate
->session
->se_client
->cl_clientid
,
6700 sizeof(clientid_t
));
6702 status
= nfserr_stale_clientid
;
6703 if (STALE_CLIENTID(&lock
->lk_new_clientid
, nn
))
6706 /* validate and update open stateid and open seqid */
6707 status
= nfs4_preprocess_confirmed_seqid_op(cstate
,
6708 lock
->lk_new_open_seqid
,
6709 &lock
->lk_new_open_stateid
,
6713 mutex_unlock(&open_stp
->st_mutex
);
6714 open_sop
= openowner(open_stp
->st_stateowner
);
6715 status
= nfserr_bad_stateid
;
6716 if (!same_clid(&open_sop
->oo_owner
.so_client
->cl_clientid
,
6717 &lock
->lk_new_clientid
))
6719 status
= lookup_or_create_lock_state(cstate
, open_stp
, lock
,
6722 status
= nfs4_preprocess_seqid_op(cstate
,
6723 lock
->lk_old_lock_seqid
,
6724 &lock
->lk_old_lock_stateid
,
6725 NFS4_LOCK_STID
, &lock_stp
, nn
);
6729 lock_sop
= lockowner(lock_stp
->st_stateowner
);
6731 lkflg
= setlkflg(lock
->lk_type
);
6732 status
= nfs4_check_openmode(lock_stp
, lkflg
);
6736 status
= nfserr_grace
;
6737 if (locks_in_grace(net
) && !lock
->lk_reclaim
)
6739 status
= nfserr_no_grace
;
6740 if (!locks_in_grace(net
) && lock
->lk_reclaim
)
6743 fp
= lock_stp
->st_stid
.sc_file
;
6744 switch (lock
->lk_type
) {
6746 if (nfsd4_has_session(cstate
))
6747 fl_flags
|= FL_SLEEP
;
6750 spin_lock(&fp
->fi_lock
);
6751 nf
= find_readable_file_locked(fp
);
6753 get_lock_access(lock_stp
, NFS4_SHARE_ACCESS_READ
);
6754 spin_unlock(&fp
->fi_lock
);
6757 case NFS4_WRITEW_LT
:
6758 if (nfsd4_has_session(cstate
))
6759 fl_flags
|= FL_SLEEP
;
6762 spin_lock(&fp
->fi_lock
);
6763 nf
= find_writeable_file_locked(fp
);
6765 get_lock_access(lock_stp
, NFS4_SHARE_ACCESS_WRITE
);
6766 spin_unlock(&fp
->fi_lock
);
6770 status
= nfserr_inval
;
6775 status
= nfserr_openmode
;
6779 nbl
= find_or_allocate_block(lock_sop
, &fp
->fi_fhandle
, nn
);
6781 dprintk("NFSD: %s: unable to allocate block!\n", __func__
);
6782 status
= nfserr_jukebox
;
6786 file_lock
= &nbl
->nbl_lock
;
6787 file_lock
->fl_type
= fl_type
;
6788 file_lock
->fl_owner
= (fl_owner_t
)lockowner(nfs4_get_stateowner(&lock_sop
->lo_owner
));
6789 file_lock
->fl_pid
= current
->tgid
;
6790 file_lock
->fl_file
= nf
->nf_file
;
6791 file_lock
->fl_flags
= fl_flags
;
6792 file_lock
->fl_lmops
= &nfsd_posix_mng_ops
;
6793 file_lock
->fl_start
= lock
->lk_offset
;
6794 file_lock
->fl_end
= last_byte_offset(lock
->lk_offset
, lock
->lk_length
);
6795 nfs4_transform_lock_offset(file_lock
);
6797 conflock
= locks_alloc_lock();
6799 dprintk("NFSD: %s: unable to allocate lock!\n", __func__
);
6800 status
= nfserr_jukebox
;
6804 if (fl_flags
& FL_SLEEP
) {
6805 nbl
->nbl_time
= ktime_get_boottime_seconds();
6806 spin_lock(&nn
->blocked_locks_lock
);
6807 list_add_tail(&nbl
->nbl_list
, &lock_sop
->lo_blocked
);
6808 list_add_tail(&nbl
->nbl_lru
, &nn
->blocked_locks_lru
);
6809 spin_unlock(&nn
->blocked_locks_lock
);
6812 err
= vfs_lock_file(nf
->nf_file
, F_SETLK
, file_lock
, conflock
);
6814 case 0: /* success! */
6815 nfs4_inc_and_copy_stateid(&lock
->lk_resp_stateid
, &lock_stp
->st_stid
);
6817 if (lock
->lk_reclaim
)
6818 nn
->somebody_reclaimed
= true;
6820 case FILE_LOCK_DEFERRED
:
6823 case -EAGAIN
: /* conflock holds conflicting lock */
6824 status
= nfserr_denied
;
6825 dprintk("NFSD: nfsd4_lock: conflicting lock found!\n");
6826 nfs4_set_lock_denied(conflock
, &lock
->lk_denied
);
6829 status
= nfserr_deadlock
;
6832 dprintk("NFSD: nfsd4_lock: vfs_lock_file() failed! status %d\n",err
);
6833 status
= nfserrno(err
);
6838 /* dequeue it if we queued it before */
6839 if (fl_flags
& FL_SLEEP
) {
6840 spin_lock(&nn
->blocked_locks_lock
);
6841 list_del_init(&nbl
->nbl_list
);
6842 list_del_init(&nbl
->nbl_lru
);
6843 spin_unlock(&nn
->blocked_locks_lock
);
6845 free_blocked_lock(nbl
);
6850 /* Bump seqid manually if the 4.0 replay owner is openowner */
6851 if (cstate
->replay_owner
&&
6852 cstate
->replay_owner
!= &lock_sop
->lo_owner
&&
6853 seqid_mutating_err(ntohl(status
)))
6854 lock_sop
->lo_owner
.so_seqid
++;
6857 * If this is a new, never-before-used stateid, and we are
6858 * returning an error, then just go ahead and release it.
6861 release_lock_stateid(lock_stp
);
6863 mutex_unlock(&lock_stp
->st_mutex
);
6865 nfs4_put_stid(&lock_stp
->st_stid
);
6868 nfs4_put_stid(&open_stp
->st_stid
);
6869 nfsd4_bump_seqid(cstate
, status
);
6871 locks_free_lock(conflock
);
6876 * The NFSv4 spec allows a client to do a LOCKT without holding an OPEN,
6877 * so we do a temporary open here just to get an open file to pass to
6878 * vfs_test_lock. (Arguably perhaps test_lock should be done with an
6881 static __be32
nfsd_test_lock(struct svc_rqst
*rqstp
, struct svc_fh
*fhp
, struct file_lock
*lock
)
6883 struct nfsd_file
*nf
;
6884 __be32 err
= nfsd_file_acquire(rqstp
, fhp
, NFSD_MAY_READ
, &nf
);
6886 err
= nfserrno(vfs_test_lock(nf
->nf_file
, lock
));
6896 nfsd4_lockt(struct svc_rqst
*rqstp
, struct nfsd4_compound_state
*cstate
,
6897 union nfsd4_op_u
*u
)
6899 struct nfsd4_lockt
*lockt
= &u
->lockt
;
6900 struct file_lock
*file_lock
= NULL
;
6901 struct nfs4_lockowner
*lo
= NULL
;
6903 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
6905 if (locks_in_grace(SVC_NET(rqstp
)))
6906 return nfserr_grace
;
6908 if (check_lock_length(lockt
->lt_offset
, lockt
->lt_length
))
6909 return nfserr_inval
;
6911 if (!nfsd4_has_session(cstate
)) {
6912 status
= lookup_clientid(&lockt
->lt_clientid
, cstate
, nn
,
6918 if ((status
= fh_verify(rqstp
, &cstate
->current_fh
, S_IFREG
, 0)))
6921 file_lock
= locks_alloc_lock();
6923 dprintk("NFSD: %s: unable to allocate lock!\n", __func__
);
6924 status
= nfserr_jukebox
;
6928 switch (lockt
->lt_type
) {
6931 file_lock
->fl_type
= F_RDLCK
;
6934 case NFS4_WRITEW_LT
:
6935 file_lock
->fl_type
= F_WRLCK
;
6938 dprintk("NFSD: nfs4_lockt: bad lock type!\n");
6939 status
= nfserr_inval
;
6943 lo
= find_lockowner_str(cstate
->clp
, &lockt
->lt_owner
);
6945 file_lock
->fl_owner
= (fl_owner_t
)lo
;
6946 file_lock
->fl_pid
= current
->tgid
;
6947 file_lock
->fl_flags
= FL_POSIX
;
6949 file_lock
->fl_start
= lockt
->lt_offset
;
6950 file_lock
->fl_end
= last_byte_offset(lockt
->lt_offset
, lockt
->lt_length
);
6952 nfs4_transform_lock_offset(file_lock
);
6954 status
= nfsd_test_lock(rqstp
, &cstate
->current_fh
, file_lock
);
6958 if (file_lock
->fl_type
!= F_UNLCK
) {
6959 status
= nfserr_denied
;
6960 nfs4_set_lock_denied(file_lock
, &lockt
->lt_denied
);
6964 nfs4_put_stateowner(&lo
->lo_owner
);
6966 locks_free_lock(file_lock
);
6971 nfsd4_locku(struct svc_rqst
*rqstp
, struct nfsd4_compound_state
*cstate
,
6972 union nfsd4_op_u
*u
)
6974 struct nfsd4_locku
*locku
= &u
->locku
;
6975 struct nfs4_ol_stateid
*stp
;
6976 struct nfsd_file
*nf
= NULL
;
6977 struct file_lock
*file_lock
= NULL
;
6980 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
6982 dprintk("NFSD: nfsd4_locku: start=%Ld length=%Ld\n",
6983 (long long) locku
->lu_offset
,
6984 (long long) locku
->lu_length
);
6986 if (check_lock_length(locku
->lu_offset
, locku
->lu_length
))
6987 return nfserr_inval
;
6989 status
= nfs4_preprocess_seqid_op(cstate
, locku
->lu_seqid
,
6990 &locku
->lu_stateid
, NFS4_LOCK_STID
,
6994 nf
= find_any_file(stp
->st_stid
.sc_file
);
6996 status
= nfserr_lock_range
;
6999 file_lock
= locks_alloc_lock();
7001 dprintk("NFSD: %s: unable to allocate lock!\n", __func__
);
7002 status
= nfserr_jukebox
;
7006 file_lock
->fl_type
= F_UNLCK
;
7007 file_lock
->fl_owner
= (fl_owner_t
)lockowner(nfs4_get_stateowner(stp
->st_stateowner
));
7008 file_lock
->fl_pid
= current
->tgid
;
7009 file_lock
->fl_file
= nf
->nf_file
;
7010 file_lock
->fl_flags
= FL_POSIX
;
7011 file_lock
->fl_lmops
= &nfsd_posix_mng_ops
;
7012 file_lock
->fl_start
= locku
->lu_offset
;
7014 file_lock
->fl_end
= last_byte_offset(locku
->lu_offset
,
7016 nfs4_transform_lock_offset(file_lock
);
7018 err
= vfs_lock_file(nf
->nf_file
, F_SETLK
, file_lock
, NULL
);
7020 dprintk("NFSD: nfs4_locku: vfs_lock_file failed!\n");
7023 nfs4_inc_and_copy_stateid(&locku
->lu_stateid
, &stp
->st_stid
);
7027 mutex_unlock(&stp
->st_mutex
);
7028 nfs4_put_stid(&stp
->st_stid
);
7030 nfsd4_bump_seqid(cstate
, status
);
7032 locks_free_lock(file_lock
);
7036 status
= nfserrno(err
);
7042 * true: locks held by lockowner
7043 * false: no locks held by lockowner
7046 check_for_locks(struct nfs4_file
*fp
, struct nfs4_lockowner
*lowner
)
7048 struct file_lock
*fl
;
7050 struct nfsd_file
*nf
= find_any_file(fp
);
7051 struct inode
*inode
;
7052 struct file_lock_context
*flctx
;
7055 /* Any valid lock stateid should have some sort of access */
7060 inode
= locks_inode(nf
->nf_file
);
7061 flctx
= inode
->i_flctx
;
7063 if (flctx
&& !list_empty_careful(&flctx
->flc_posix
)) {
7064 spin_lock(&flctx
->flc_lock
);
7065 list_for_each_entry(fl
, &flctx
->flc_posix
, fl_list
) {
7066 if (fl
->fl_owner
== (fl_owner_t
)lowner
) {
7071 spin_unlock(&flctx
->flc_lock
);
7078 nfsd4_release_lockowner(struct svc_rqst
*rqstp
,
7079 struct nfsd4_compound_state
*cstate
,
7080 union nfsd4_op_u
*u
)
7082 struct nfsd4_release_lockowner
*rlockowner
= &u
->release_lockowner
;
7083 clientid_t
*clid
= &rlockowner
->rl_clientid
;
7084 struct nfs4_stateowner
*sop
;
7085 struct nfs4_lockowner
*lo
= NULL
;
7086 struct nfs4_ol_stateid
*stp
;
7087 struct xdr_netobj
*owner
= &rlockowner
->rl_owner
;
7088 unsigned int hashval
= ownerstr_hashval(owner
);
7090 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
7091 struct nfs4_client
*clp
;
7092 LIST_HEAD (reaplist
);
7094 dprintk("nfsd4_release_lockowner clientid: (%08x/%08x):\n",
7095 clid
->cl_boot
, clid
->cl_id
);
7097 status
= lookup_clientid(clid
, cstate
, nn
, false);
7102 /* Find the matching lock stateowner */
7103 spin_lock(&clp
->cl_lock
);
7104 list_for_each_entry(sop
, &clp
->cl_ownerstr_hashtbl
[hashval
],
7107 if (sop
->so_is_open_owner
|| !same_owner_str(sop
, owner
))
7110 /* see if there are still any locks associated with it */
7111 lo
= lockowner(sop
);
7112 list_for_each_entry(stp
, &sop
->so_stateids
, st_perstateowner
) {
7113 if (check_for_locks(stp
->st_stid
.sc_file
, lo
)) {
7114 status
= nfserr_locks_held
;
7115 spin_unlock(&clp
->cl_lock
);
7120 nfs4_get_stateowner(sop
);
7124 spin_unlock(&clp
->cl_lock
);
7128 unhash_lockowner_locked(lo
);
7129 while (!list_empty(&lo
->lo_owner
.so_stateids
)) {
7130 stp
= list_first_entry(&lo
->lo_owner
.so_stateids
,
7131 struct nfs4_ol_stateid
,
7133 WARN_ON(!unhash_lock_stateid(stp
));
7134 put_ol_stateid_locked(stp
, &reaplist
);
7136 spin_unlock(&clp
->cl_lock
);
7137 free_ol_stateid_reaplist(&reaplist
);
7138 remove_blocked_locks(lo
);
7139 nfs4_put_stateowner(&lo
->lo_owner
);
7144 static inline struct nfs4_client_reclaim
*
7147 return kmalloc(sizeof(struct nfs4_client_reclaim
), GFP_KERNEL
);
7151 nfs4_has_reclaimed_state(struct xdr_netobj name
, struct nfsd_net
*nn
)
7153 struct nfs4_client_reclaim
*crp
;
7155 crp
= nfsd4_find_reclaim_client(name
, nn
);
7156 return (crp
&& crp
->cr_clp
);
7160 * failure => all reset bets are off, nfserr_no_grace...
7162 * The caller is responsible for freeing name.data if NULL is returned (it
7163 * will be freed in nfs4_remove_reclaim_record in the normal case).
7165 struct nfs4_client_reclaim
*
7166 nfs4_client_to_reclaim(struct xdr_netobj name
, struct xdr_netobj princhash
,
7167 struct nfsd_net
*nn
)
7169 unsigned int strhashval
;
7170 struct nfs4_client_reclaim
*crp
;
7172 trace_nfsd_clid_reclaim(nn
, name
.len
, name
.data
);
7173 crp
= alloc_reclaim();
7175 strhashval
= clientstr_hashval(name
);
7176 INIT_LIST_HEAD(&crp
->cr_strhash
);
7177 list_add(&crp
->cr_strhash
, &nn
->reclaim_str_hashtbl
[strhashval
]);
7178 crp
->cr_name
.data
= name
.data
;
7179 crp
->cr_name
.len
= name
.len
;
7180 crp
->cr_princhash
.data
= princhash
.data
;
7181 crp
->cr_princhash
.len
= princhash
.len
;
7183 nn
->reclaim_str_hashtbl_size
++;
7189 nfs4_remove_reclaim_record(struct nfs4_client_reclaim
*crp
, struct nfsd_net
*nn
)
7191 list_del(&crp
->cr_strhash
);
7192 kfree(crp
->cr_name
.data
);
7193 kfree(crp
->cr_princhash
.data
);
7195 nn
->reclaim_str_hashtbl_size
--;
7199 nfs4_release_reclaim(struct nfsd_net
*nn
)
7201 struct nfs4_client_reclaim
*crp
= NULL
;
7204 for (i
= 0; i
< CLIENT_HASH_SIZE
; i
++) {
7205 while (!list_empty(&nn
->reclaim_str_hashtbl
[i
])) {
7206 crp
= list_entry(nn
->reclaim_str_hashtbl
[i
].next
,
7207 struct nfs4_client_reclaim
, cr_strhash
);
7208 nfs4_remove_reclaim_record(crp
, nn
);
7211 WARN_ON_ONCE(nn
->reclaim_str_hashtbl_size
);
7215 * called from OPEN, CLAIM_PREVIOUS with a new clientid. */
7216 struct nfs4_client_reclaim
*
7217 nfsd4_find_reclaim_client(struct xdr_netobj name
, struct nfsd_net
*nn
)
7219 unsigned int strhashval
;
7220 struct nfs4_client_reclaim
*crp
= NULL
;
7222 trace_nfsd_clid_find(nn
, name
.len
, name
.data
);
7224 strhashval
= clientstr_hashval(name
);
7225 list_for_each_entry(crp
, &nn
->reclaim_str_hashtbl
[strhashval
], cr_strhash
) {
7226 if (compare_blob(&crp
->cr_name
, &name
) == 0) {
7234 * Called from OPEN. Look for clientid in reclaim list.
7237 nfs4_check_open_reclaim(clientid_t
*clid
,
7238 struct nfsd4_compound_state
*cstate
,
7239 struct nfsd_net
*nn
)
7243 /* find clientid in conf_id_hashtbl */
7244 status
= lookup_clientid(clid
, cstate
, nn
, false);
7246 return nfserr_reclaim_bad
;
7248 if (test_bit(NFSD4_CLIENT_RECLAIM_COMPLETE
, &cstate
->clp
->cl_flags
))
7249 return nfserr_no_grace
;
7251 if (nfsd4_client_record_check(cstate
->clp
))
7252 return nfserr_reclaim_bad
;
7258 * Since the lifetime of a delegation isn't limited to that of an open, a
7259 * client may quite reasonably hang on to a delegation as long as it has
7260 * the inode cached. This becomes an obvious problem the first time a
7261 * client's inode cache approaches the size of the server's total memory.
7263 * For now we avoid this problem by imposing a hard limit on the number
7264 * of delegations, which varies according to the server's memory size.
7267 set_max_delegations(void)
7270 * Allow at most 4 delegations per megabyte of RAM. Quick
7271 * estimates suggest that in the worst case (where every delegation
7272 * is for a different inode), a delegation could take about 1.5K,
7273 * giving a worst case usage of about 6% of memory.
7275 max_delegations
= nr_free_buffer_pages() >> (20 - 2 - PAGE_SHIFT
);
7278 static int nfs4_state_create_net(struct net
*net
)
7280 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
7283 nn
->conf_id_hashtbl
= kmalloc_array(CLIENT_HASH_SIZE
,
7284 sizeof(struct list_head
),
7286 if (!nn
->conf_id_hashtbl
)
7288 nn
->unconf_id_hashtbl
= kmalloc_array(CLIENT_HASH_SIZE
,
7289 sizeof(struct list_head
),
7291 if (!nn
->unconf_id_hashtbl
)
7293 nn
->sessionid_hashtbl
= kmalloc_array(SESSION_HASH_SIZE
,
7294 sizeof(struct list_head
),
7296 if (!nn
->sessionid_hashtbl
)
7299 for (i
= 0; i
< CLIENT_HASH_SIZE
; i
++) {
7300 INIT_LIST_HEAD(&nn
->conf_id_hashtbl
[i
]);
7301 INIT_LIST_HEAD(&nn
->unconf_id_hashtbl
[i
]);
7303 for (i
= 0; i
< SESSION_HASH_SIZE
; i
++)
7304 INIT_LIST_HEAD(&nn
->sessionid_hashtbl
[i
]);
7305 nn
->conf_name_tree
= RB_ROOT
;
7306 nn
->unconf_name_tree
= RB_ROOT
;
7307 nn
->boot_time
= ktime_get_real_seconds();
7308 nn
->grace_ended
= false;
7309 nn
->nfsd4_manager
.block_opens
= true;
7310 INIT_LIST_HEAD(&nn
->nfsd4_manager
.list
);
7311 INIT_LIST_HEAD(&nn
->client_lru
);
7312 INIT_LIST_HEAD(&nn
->close_lru
);
7313 INIT_LIST_HEAD(&nn
->del_recall_lru
);
7314 spin_lock_init(&nn
->client_lock
);
7315 spin_lock_init(&nn
->s2s_cp_lock
);
7316 idr_init(&nn
->s2s_cp_stateids
);
7318 spin_lock_init(&nn
->blocked_locks_lock
);
7319 INIT_LIST_HEAD(&nn
->blocked_locks_lru
);
7321 INIT_DELAYED_WORK(&nn
->laundromat_work
, laundromat_main
);
7327 kfree(nn
->unconf_id_hashtbl
);
7329 kfree(nn
->conf_id_hashtbl
);
7335 nfs4_state_destroy_net(struct net
*net
)
7338 struct nfs4_client
*clp
= NULL
;
7339 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
7341 for (i
= 0; i
< CLIENT_HASH_SIZE
; i
++) {
7342 while (!list_empty(&nn
->conf_id_hashtbl
[i
])) {
7343 clp
= list_entry(nn
->conf_id_hashtbl
[i
].next
, struct nfs4_client
, cl_idhash
);
7344 destroy_client(clp
);
7348 WARN_ON(!list_empty(&nn
->blocked_locks_lru
));
7350 for (i
= 0; i
< CLIENT_HASH_SIZE
; i
++) {
7351 while (!list_empty(&nn
->unconf_id_hashtbl
[i
])) {
7352 clp
= list_entry(nn
->unconf_id_hashtbl
[i
].next
, struct nfs4_client
, cl_idhash
);
7353 destroy_client(clp
);
7357 kfree(nn
->sessionid_hashtbl
);
7358 kfree(nn
->unconf_id_hashtbl
);
7359 kfree(nn
->conf_id_hashtbl
);
7364 nfs4_state_start_net(struct net
*net
)
7366 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
7369 ret
= get_nfsdfs(net
);
7372 ret
= nfs4_state_create_net(net
);
7374 mntput(nn
->nfsd_mnt
);
7377 locks_start_grace(net
, &nn
->nfsd4_manager
);
7378 nfsd4_client_tracking_init(net
);
7379 if (nn
->track_reclaim_completes
&& nn
->reclaim_str_hashtbl_size
== 0)
7381 printk(KERN_INFO
"NFSD: starting %lld-second grace period (net %x)\n",
7382 nn
->nfsd4_grace
, net
->ns
.inum
);
7383 trace_nfsd_grace_start(nn
);
7384 queue_delayed_work(laundry_wq
, &nn
->laundromat_work
, nn
->nfsd4_grace
* HZ
);
7388 printk(KERN_INFO
"NFSD: no clients to reclaim, skipping NFSv4 grace period (net %x)\n",
7390 queue_delayed_work(laundry_wq
, &nn
->laundromat_work
, nn
->nfsd4_lease
* HZ
);
7391 nfsd4_end_grace(nn
);
7395 /* initialization to perform when the nfsd service is started: */
7398 nfs4_state_start(void)
7402 laundry_wq
= alloc_workqueue("%s", WQ_UNBOUND
, 0, "nfsd4");
7403 if (laundry_wq
== NULL
) {
7407 ret
= nfsd4_create_callback_queue();
7409 goto out_free_laundry
;
7411 set_max_delegations();
7415 destroy_workqueue(laundry_wq
);
7421 nfs4_state_shutdown_net(struct net
*net
)
7423 struct nfs4_delegation
*dp
= NULL
;
7424 struct list_head
*pos
, *next
, reaplist
;
7425 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
7427 cancel_delayed_work_sync(&nn
->laundromat_work
);
7428 locks_end_grace(&nn
->nfsd4_manager
);
7430 INIT_LIST_HEAD(&reaplist
);
7431 spin_lock(&state_lock
);
7432 list_for_each_safe(pos
, next
, &nn
->del_recall_lru
) {
7433 dp
= list_entry (pos
, struct nfs4_delegation
, dl_recall_lru
);
7434 WARN_ON(!unhash_delegation_locked(dp
));
7435 list_add(&dp
->dl_recall_lru
, &reaplist
);
7437 spin_unlock(&state_lock
);
7438 list_for_each_safe(pos
, next
, &reaplist
) {
7439 dp
= list_entry (pos
, struct nfs4_delegation
, dl_recall_lru
);
7440 list_del_init(&dp
->dl_recall_lru
);
7441 destroy_unhashed_deleg(dp
);
7444 nfsd4_client_tracking_exit(net
);
7445 nfs4_state_destroy_net(net
);
7446 mntput(nn
->nfsd_mnt
);
7450 nfs4_state_shutdown(void)
7452 destroy_workqueue(laundry_wq
);
7453 nfsd4_destroy_callback_queue();
7457 get_stateid(struct nfsd4_compound_state
*cstate
, stateid_t
*stateid
)
7459 if (HAS_CSTATE_FLAG(cstate
, CURRENT_STATE_ID_FLAG
) &&
7460 CURRENT_STATEID(stateid
))
7461 memcpy(stateid
, &cstate
->current_stateid
, sizeof(stateid_t
));
7465 put_stateid(struct nfsd4_compound_state
*cstate
, stateid_t
*stateid
)
7467 if (cstate
->minorversion
) {
7468 memcpy(&cstate
->current_stateid
, stateid
, sizeof(stateid_t
));
7469 SET_CSTATE_FLAG(cstate
, CURRENT_STATE_ID_FLAG
);
7474 clear_current_stateid(struct nfsd4_compound_state
*cstate
)
7476 CLEAR_CSTATE_FLAG(cstate
, CURRENT_STATE_ID_FLAG
);
7480 * functions to set current state id
7483 nfsd4_set_opendowngradestateid(struct nfsd4_compound_state
*cstate
,
7484 union nfsd4_op_u
*u
)
7486 put_stateid(cstate
, &u
->open_downgrade
.od_stateid
);
7490 nfsd4_set_openstateid(struct nfsd4_compound_state
*cstate
,
7491 union nfsd4_op_u
*u
)
7493 put_stateid(cstate
, &u
->open
.op_stateid
);
7497 nfsd4_set_closestateid(struct nfsd4_compound_state
*cstate
,
7498 union nfsd4_op_u
*u
)
7500 put_stateid(cstate
, &u
->close
.cl_stateid
);
7504 nfsd4_set_lockstateid(struct nfsd4_compound_state
*cstate
,
7505 union nfsd4_op_u
*u
)
7507 put_stateid(cstate
, &u
->lock
.lk_resp_stateid
);
7511 * functions to consume current state id
7515 nfsd4_get_opendowngradestateid(struct nfsd4_compound_state
*cstate
,
7516 union nfsd4_op_u
*u
)
7518 get_stateid(cstate
, &u
->open_downgrade
.od_stateid
);
7522 nfsd4_get_delegreturnstateid(struct nfsd4_compound_state
*cstate
,
7523 union nfsd4_op_u
*u
)
7525 get_stateid(cstate
, &u
->delegreturn
.dr_stateid
);
7529 nfsd4_get_freestateid(struct nfsd4_compound_state
*cstate
,
7530 union nfsd4_op_u
*u
)
7532 get_stateid(cstate
, &u
->free_stateid
.fr_stateid
);
7536 nfsd4_get_setattrstateid(struct nfsd4_compound_state
*cstate
,
7537 union nfsd4_op_u
*u
)
7539 get_stateid(cstate
, &u
->setattr
.sa_stateid
);
7543 nfsd4_get_closestateid(struct nfsd4_compound_state
*cstate
,
7544 union nfsd4_op_u
*u
)
7546 get_stateid(cstate
, &u
->close
.cl_stateid
);
7550 nfsd4_get_lockustateid(struct nfsd4_compound_state
*cstate
,
7551 union nfsd4_op_u
*u
)
7553 get_stateid(cstate
, &u
->locku
.lu_stateid
);
7557 nfsd4_get_readstateid(struct nfsd4_compound_state
*cstate
,
7558 union nfsd4_op_u
*u
)
7560 get_stateid(cstate
, &u
->read
.rd_stateid
);
7564 nfsd4_get_writestateid(struct nfsd4_compound_state
*cstate
,
7565 union nfsd4_op_u
*u
)
7567 get_stateid(cstate
, &u
->write
.wr_stateid
);