2 * Copyright (c) 2001 The Regents of the University of Michigan.
5 * Kendrick Smith <kmsmith@umich.edu>
6 * Andy Adamson <kandros@umich.edu>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the name of the University nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
22 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
23 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
28 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
29 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
30 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
31 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 #include <linux/file.h>
37 #include <linux/slab.h>
38 #include <linux/namei.h>
39 #include <linux/swap.h>
40 #include <linux/pagemap.h>
41 #include <linux/ratelimit.h>
42 #include <linux/sunrpc/svcauth_gss.h>
43 #include <linux/sunrpc/addr.h>
44 #include <linux/jhash.h>
48 #include "current_stateid.h"
53 #define NFSDDBG_FACILITY NFSDDBG_PROC
55 #define all_ones {{~0,~0},~0}
56 static const stateid_t one_stateid
= {
58 .si_opaque
= all_ones
,
60 static const stateid_t zero_stateid
= {
63 static const stateid_t currentstateid
= {
66 static const stateid_t close_stateid
= {
67 .si_generation
= 0xffffffffU
,
70 static u64 current_sessionid
= 1;
72 #define ZERO_STATEID(stateid) (!memcmp((stateid), &zero_stateid, sizeof(stateid_t)))
73 #define ONE_STATEID(stateid) (!memcmp((stateid), &one_stateid, sizeof(stateid_t)))
74 #define CURRENT_STATEID(stateid) (!memcmp((stateid), ¤tstateid, sizeof(stateid_t)))
75 #define CLOSE_STATEID(stateid) (!memcmp((stateid), &close_stateid, sizeof(stateid_t)))
77 /* forward declarations */
78 static bool check_for_locks(struct nfs4_file
*fp
, struct nfs4_lockowner
*lowner
);
79 static void nfs4_free_ol_stateid(struct nfs4_stid
*stid
);
84 * Currently used for the del_recall_lru and file hash table. In an
85 * effort to decrease the scope of the client_mutex, this spinlock may
86 * eventually cover more:
88 static DEFINE_SPINLOCK(state_lock
);
90 enum nfsd4_st_mutex_lock_subclass
{
91 OPEN_STATEID_MUTEX
= 0,
92 LOCK_STATEID_MUTEX
= 1,
96 * A waitqueue for all in-progress 4.0 CLOSE operations that are waiting for
97 * the refcount on the open stateid to drop.
99 static DECLARE_WAIT_QUEUE_HEAD(close_wq
);
101 static struct kmem_cache
*client_slab
;
102 static struct kmem_cache
*openowner_slab
;
103 static struct kmem_cache
*lockowner_slab
;
104 static struct kmem_cache
*file_slab
;
105 static struct kmem_cache
*stateid_slab
;
106 static struct kmem_cache
*deleg_slab
;
107 static struct kmem_cache
*odstate_slab
;
109 static void free_session(struct nfsd4_session
*);
111 static const struct nfsd4_callback_ops nfsd4_cb_recall_ops
;
112 static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops
;
114 static bool is_session_dead(struct nfsd4_session
*ses
)
116 return ses
->se_flags
& NFS4_SESSION_DEAD
;
119 static __be32
mark_session_dead_locked(struct nfsd4_session
*ses
, int ref_held_by_me
)
121 if (atomic_read(&ses
->se_ref
) > ref_held_by_me
)
122 return nfserr_jukebox
;
123 ses
->se_flags
|= NFS4_SESSION_DEAD
;
127 static bool is_client_expired(struct nfs4_client
*clp
)
129 return clp
->cl_time
== 0;
132 static __be32
get_client_locked(struct nfs4_client
*clp
)
134 struct nfsd_net
*nn
= net_generic(clp
->net
, nfsd_net_id
);
136 lockdep_assert_held(&nn
->client_lock
);
138 if (is_client_expired(clp
))
139 return nfserr_expired
;
140 atomic_inc(&clp
->cl_refcount
);
144 /* must be called under the client_lock */
146 renew_client_locked(struct nfs4_client
*clp
)
148 struct nfsd_net
*nn
= net_generic(clp
->net
, nfsd_net_id
);
150 if (is_client_expired(clp
)) {
152 printk("%s: client (clientid %08x/%08x) already expired\n",
154 clp
->cl_clientid
.cl_boot
,
155 clp
->cl_clientid
.cl_id
);
159 dprintk("renewing client (clientid %08x/%08x)\n",
160 clp
->cl_clientid
.cl_boot
,
161 clp
->cl_clientid
.cl_id
);
162 list_move_tail(&clp
->cl_lru
, &nn
->client_lru
);
163 clp
->cl_time
= get_seconds();
166 static void put_client_renew_locked(struct nfs4_client
*clp
)
168 struct nfsd_net
*nn
= net_generic(clp
->net
, nfsd_net_id
);
170 lockdep_assert_held(&nn
->client_lock
);
172 if (!atomic_dec_and_test(&clp
->cl_refcount
))
174 if (!is_client_expired(clp
))
175 renew_client_locked(clp
);
178 static void put_client_renew(struct nfs4_client
*clp
)
180 struct nfsd_net
*nn
= net_generic(clp
->net
, nfsd_net_id
);
182 if (!atomic_dec_and_lock(&clp
->cl_refcount
, &nn
->client_lock
))
184 if (!is_client_expired(clp
))
185 renew_client_locked(clp
);
186 spin_unlock(&nn
->client_lock
);
189 static __be32
nfsd4_get_session_locked(struct nfsd4_session
*ses
)
193 if (is_session_dead(ses
))
194 return nfserr_badsession
;
195 status
= get_client_locked(ses
->se_client
);
198 atomic_inc(&ses
->se_ref
);
202 static void nfsd4_put_session_locked(struct nfsd4_session
*ses
)
204 struct nfs4_client
*clp
= ses
->se_client
;
205 struct nfsd_net
*nn
= net_generic(clp
->net
, nfsd_net_id
);
207 lockdep_assert_held(&nn
->client_lock
);
209 if (atomic_dec_and_test(&ses
->se_ref
) && is_session_dead(ses
))
211 put_client_renew_locked(clp
);
214 static void nfsd4_put_session(struct nfsd4_session
*ses
)
216 struct nfs4_client
*clp
= ses
->se_client
;
217 struct nfsd_net
*nn
= net_generic(clp
->net
, nfsd_net_id
);
219 spin_lock(&nn
->client_lock
);
220 nfsd4_put_session_locked(ses
);
221 spin_unlock(&nn
->client_lock
);
224 static struct nfsd4_blocked_lock
*
225 find_blocked_lock(struct nfs4_lockowner
*lo
, struct knfsd_fh
*fh
,
228 struct nfsd4_blocked_lock
*cur
, *found
= NULL
;
230 spin_lock(&nn
->blocked_locks_lock
);
231 list_for_each_entry(cur
, &lo
->lo_blocked
, nbl_list
) {
232 if (fh_match(fh
, &cur
->nbl_fh
)) {
233 list_del_init(&cur
->nbl_list
);
234 list_del_init(&cur
->nbl_lru
);
239 spin_unlock(&nn
->blocked_locks_lock
);
241 posix_unblock_lock(&found
->nbl_lock
);
245 static struct nfsd4_blocked_lock
*
246 find_or_allocate_block(struct nfs4_lockowner
*lo
, struct knfsd_fh
*fh
,
249 struct nfsd4_blocked_lock
*nbl
;
251 nbl
= find_blocked_lock(lo
, fh
, nn
);
253 nbl
= kmalloc(sizeof(*nbl
), GFP_KERNEL
);
255 fh_copy_shallow(&nbl
->nbl_fh
, fh
);
256 locks_init_lock(&nbl
->nbl_lock
);
257 nfsd4_init_cb(&nbl
->nbl_cb
, lo
->lo_owner
.so_client
,
258 &nfsd4_cb_notify_lock_ops
,
259 NFSPROC4_CLNT_CB_NOTIFY_LOCK
);
266 free_blocked_lock(struct nfsd4_blocked_lock
*nbl
)
268 locks_release_private(&nbl
->nbl_lock
);
273 remove_blocked_locks(struct nfs4_lockowner
*lo
)
275 struct nfs4_client
*clp
= lo
->lo_owner
.so_client
;
276 struct nfsd_net
*nn
= net_generic(clp
->net
, nfsd_net_id
);
277 struct nfsd4_blocked_lock
*nbl
;
280 /* Dequeue all blocked locks */
281 spin_lock(&nn
->blocked_locks_lock
);
282 while (!list_empty(&lo
->lo_blocked
)) {
283 nbl
= list_first_entry(&lo
->lo_blocked
,
284 struct nfsd4_blocked_lock
,
286 list_del_init(&nbl
->nbl_list
);
287 list_move(&nbl
->nbl_lru
, &reaplist
);
289 spin_unlock(&nn
->blocked_locks_lock
);
292 while (!list_empty(&reaplist
)) {
293 nbl
= list_first_entry(&reaplist
, struct nfsd4_blocked_lock
,
295 list_del_init(&nbl
->nbl_lru
);
296 posix_unblock_lock(&nbl
->nbl_lock
);
297 free_blocked_lock(nbl
);
302 nfsd4_cb_notify_lock_done(struct nfsd4_callback
*cb
, struct rpc_task
*task
)
305 * Since this is just an optimization, we don't try very hard if it
306 * turns out not to succeed. We'll requeue it on NFS4ERR_DELAY, and
307 * just quit trying on anything else.
309 switch (task
->tk_status
) {
311 rpc_delay(task
, 1 * HZ
);
319 nfsd4_cb_notify_lock_release(struct nfsd4_callback
*cb
)
321 struct nfsd4_blocked_lock
*nbl
= container_of(cb
,
322 struct nfsd4_blocked_lock
, nbl_cb
);
324 free_blocked_lock(nbl
);
327 static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops
= {
328 .done
= nfsd4_cb_notify_lock_done
,
329 .release
= nfsd4_cb_notify_lock_release
,
332 static inline struct nfs4_stateowner
*
333 nfs4_get_stateowner(struct nfs4_stateowner
*sop
)
335 atomic_inc(&sop
->so_count
);
340 same_owner_str(struct nfs4_stateowner
*sop
, struct xdr_netobj
*owner
)
342 return (sop
->so_owner
.len
== owner
->len
) &&
343 0 == memcmp(sop
->so_owner
.data
, owner
->data
, owner
->len
);
346 static struct nfs4_openowner
*
347 find_openstateowner_str_locked(unsigned int hashval
, struct nfsd4_open
*open
,
348 struct nfs4_client
*clp
)
350 struct nfs4_stateowner
*so
;
352 lockdep_assert_held(&clp
->cl_lock
);
354 list_for_each_entry(so
, &clp
->cl_ownerstr_hashtbl
[hashval
],
356 if (!so
->so_is_open_owner
)
358 if (same_owner_str(so
, &open
->op_owner
))
359 return openowner(nfs4_get_stateowner(so
));
364 static struct nfs4_openowner
*
365 find_openstateowner_str(unsigned int hashval
, struct nfsd4_open
*open
,
366 struct nfs4_client
*clp
)
368 struct nfs4_openowner
*oo
;
370 spin_lock(&clp
->cl_lock
);
371 oo
= find_openstateowner_str_locked(hashval
, open
, clp
);
372 spin_unlock(&clp
->cl_lock
);
377 opaque_hashval(const void *ptr
, int nbytes
)
379 unsigned char *cptr
= (unsigned char *) ptr
;
389 static void nfsd4_free_file_rcu(struct rcu_head
*rcu
)
391 struct nfs4_file
*fp
= container_of(rcu
, struct nfs4_file
, fi_rcu
);
393 kmem_cache_free(file_slab
, fp
);
397 put_nfs4_file(struct nfs4_file
*fi
)
399 might_lock(&state_lock
);
401 if (refcount_dec_and_lock(&fi
->fi_ref
, &state_lock
)) {
402 hlist_del_rcu(&fi
->fi_hash
);
403 spin_unlock(&state_lock
);
404 WARN_ON_ONCE(!list_empty(&fi
->fi_clnt_odstate
));
405 WARN_ON_ONCE(!list_empty(&fi
->fi_delegations
));
406 call_rcu(&fi
->fi_rcu
, nfsd4_free_file_rcu
);
411 __nfs4_get_fd(struct nfs4_file
*f
, int oflag
)
413 if (f
->fi_fds
[oflag
])
414 return get_file(f
->fi_fds
[oflag
]);
419 find_writeable_file_locked(struct nfs4_file
*f
)
423 lockdep_assert_held(&f
->fi_lock
);
425 ret
= __nfs4_get_fd(f
, O_WRONLY
);
427 ret
= __nfs4_get_fd(f
, O_RDWR
);
432 find_writeable_file(struct nfs4_file
*f
)
436 spin_lock(&f
->fi_lock
);
437 ret
= find_writeable_file_locked(f
);
438 spin_unlock(&f
->fi_lock
);
443 static struct file
*find_readable_file_locked(struct nfs4_file
*f
)
447 lockdep_assert_held(&f
->fi_lock
);
449 ret
= __nfs4_get_fd(f
, O_RDONLY
);
451 ret
= __nfs4_get_fd(f
, O_RDWR
);
456 find_readable_file(struct nfs4_file
*f
)
460 spin_lock(&f
->fi_lock
);
461 ret
= find_readable_file_locked(f
);
462 spin_unlock(&f
->fi_lock
);
468 find_any_file(struct nfs4_file
*f
)
472 spin_lock(&f
->fi_lock
);
473 ret
= __nfs4_get_fd(f
, O_RDWR
);
475 ret
= __nfs4_get_fd(f
, O_WRONLY
);
477 ret
= __nfs4_get_fd(f
, O_RDONLY
);
479 spin_unlock(&f
->fi_lock
);
483 static atomic_long_t num_delegations
;
484 unsigned long max_delegations
;
487 * Open owner state (share locks)
490 /* hash tables for lock and open owners */
491 #define OWNER_HASH_BITS 8
492 #define OWNER_HASH_SIZE (1 << OWNER_HASH_BITS)
493 #define OWNER_HASH_MASK (OWNER_HASH_SIZE - 1)
495 static unsigned int ownerstr_hashval(struct xdr_netobj
*ownername
)
499 ret
= opaque_hashval(ownername
->data
, ownername
->len
);
500 return ret
& OWNER_HASH_MASK
;
503 /* hash table for nfs4_file */
504 #define FILE_HASH_BITS 8
505 #define FILE_HASH_SIZE (1 << FILE_HASH_BITS)
507 static unsigned int nfsd_fh_hashval(struct knfsd_fh
*fh
)
509 return jhash2(fh
->fh_base
.fh_pad
, XDR_QUADLEN(fh
->fh_size
), 0);
512 static unsigned int file_hashval(struct knfsd_fh
*fh
)
514 return nfsd_fh_hashval(fh
) & (FILE_HASH_SIZE
- 1);
517 static struct hlist_head file_hashtbl
[FILE_HASH_SIZE
];
520 __nfs4_file_get_access(struct nfs4_file
*fp
, u32 access
)
522 lockdep_assert_held(&fp
->fi_lock
);
524 if (access
& NFS4_SHARE_ACCESS_WRITE
)
525 atomic_inc(&fp
->fi_access
[O_WRONLY
]);
526 if (access
& NFS4_SHARE_ACCESS_READ
)
527 atomic_inc(&fp
->fi_access
[O_RDONLY
]);
531 nfs4_file_get_access(struct nfs4_file
*fp
, u32 access
)
533 lockdep_assert_held(&fp
->fi_lock
);
535 /* Does this access mode make sense? */
536 if (access
& ~NFS4_SHARE_ACCESS_BOTH
)
539 /* Does it conflict with a deny mode already set? */
540 if ((access
& fp
->fi_share_deny
) != 0)
541 return nfserr_share_denied
;
543 __nfs4_file_get_access(fp
, access
);
547 static __be32
nfs4_file_check_deny(struct nfs4_file
*fp
, u32 deny
)
549 /* Common case is that there is no deny mode. */
551 /* Does this deny mode make sense? */
552 if (deny
& ~NFS4_SHARE_DENY_BOTH
)
555 if ((deny
& NFS4_SHARE_DENY_READ
) &&
556 atomic_read(&fp
->fi_access
[O_RDONLY
]))
557 return nfserr_share_denied
;
559 if ((deny
& NFS4_SHARE_DENY_WRITE
) &&
560 atomic_read(&fp
->fi_access
[O_WRONLY
]))
561 return nfserr_share_denied
;
566 static void __nfs4_file_put_access(struct nfs4_file
*fp
, int oflag
)
568 might_lock(&fp
->fi_lock
);
570 if (atomic_dec_and_lock(&fp
->fi_access
[oflag
], &fp
->fi_lock
)) {
571 struct file
*f1
= NULL
;
572 struct file
*f2
= NULL
;
574 swap(f1
, fp
->fi_fds
[oflag
]);
575 if (atomic_read(&fp
->fi_access
[1 - oflag
]) == 0)
576 swap(f2
, fp
->fi_fds
[O_RDWR
]);
577 spin_unlock(&fp
->fi_lock
);
585 static void nfs4_file_put_access(struct nfs4_file
*fp
, u32 access
)
587 WARN_ON_ONCE(access
& ~NFS4_SHARE_ACCESS_BOTH
);
589 if (access
& NFS4_SHARE_ACCESS_WRITE
)
590 __nfs4_file_put_access(fp
, O_WRONLY
);
591 if (access
& NFS4_SHARE_ACCESS_READ
)
592 __nfs4_file_put_access(fp
, O_RDONLY
);
596 * Allocate a new open/delegation state counter. This is needed for
597 * pNFS for proper return on close semantics.
599 * Note that we only allocate it for pNFS-enabled exports, otherwise
600 * all pointers to struct nfs4_clnt_odstate are always NULL.
602 static struct nfs4_clnt_odstate
*
603 alloc_clnt_odstate(struct nfs4_client
*clp
)
605 struct nfs4_clnt_odstate
*co
;
607 co
= kmem_cache_zalloc(odstate_slab
, GFP_KERNEL
);
610 refcount_set(&co
->co_odcount
, 1);
616 hash_clnt_odstate_locked(struct nfs4_clnt_odstate
*co
)
618 struct nfs4_file
*fp
= co
->co_file
;
620 lockdep_assert_held(&fp
->fi_lock
);
621 list_add(&co
->co_perfile
, &fp
->fi_clnt_odstate
);
625 get_clnt_odstate(struct nfs4_clnt_odstate
*co
)
628 refcount_inc(&co
->co_odcount
);
632 put_clnt_odstate(struct nfs4_clnt_odstate
*co
)
634 struct nfs4_file
*fp
;
640 if (refcount_dec_and_lock(&co
->co_odcount
, &fp
->fi_lock
)) {
641 list_del(&co
->co_perfile
);
642 spin_unlock(&fp
->fi_lock
);
644 nfsd4_return_all_file_layouts(co
->co_client
, fp
);
645 kmem_cache_free(odstate_slab
, co
);
649 static struct nfs4_clnt_odstate
*
650 find_or_hash_clnt_odstate(struct nfs4_file
*fp
, struct nfs4_clnt_odstate
*new)
652 struct nfs4_clnt_odstate
*co
;
653 struct nfs4_client
*cl
;
660 spin_lock(&fp
->fi_lock
);
661 list_for_each_entry(co
, &fp
->fi_clnt_odstate
, co_perfile
) {
662 if (co
->co_client
== cl
) {
663 get_clnt_odstate(co
);
669 hash_clnt_odstate_locked(new);
671 spin_unlock(&fp
->fi_lock
);
675 struct nfs4_stid
*nfs4_alloc_stid(struct nfs4_client
*cl
, struct kmem_cache
*slab
,
676 void (*sc_free
)(struct nfs4_stid
*))
678 struct nfs4_stid
*stid
;
681 stid
= kmem_cache_zalloc(slab
, GFP_KERNEL
);
685 idr_preload(GFP_KERNEL
);
686 spin_lock(&cl
->cl_lock
);
687 new_id
= idr_alloc_cyclic(&cl
->cl_stateids
, stid
, 0, 0, GFP_NOWAIT
);
688 spin_unlock(&cl
->cl_lock
);
693 stid
->sc_free
= sc_free
;
694 stid
->sc_client
= cl
;
695 stid
->sc_stateid
.si_opaque
.so_id
= new_id
;
696 stid
->sc_stateid
.si_opaque
.so_clid
= cl
->cl_clientid
;
697 /* Will be incremented before return to client: */
698 refcount_set(&stid
->sc_count
, 1);
699 spin_lock_init(&stid
->sc_lock
);
702 * It shouldn't be a problem to reuse an opaque stateid value.
703 * I don't think it is for 4.1. But with 4.0 I worry that, for
704 * example, a stray write retransmission could be accepted by
705 * the server when it should have been rejected. Therefore,
706 * adopt a trick from the sctp code to attempt to maximize the
707 * amount of time until an id is reused, by ensuring they always
708 * "increase" (mod INT_MAX):
712 kmem_cache_free(slab
, stid
);
716 static struct nfs4_ol_stateid
* nfs4_alloc_open_stateid(struct nfs4_client
*clp
)
718 struct nfs4_stid
*stid
;
720 stid
= nfs4_alloc_stid(clp
, stateid_slab
, nfs4_free_ol_stateid
);
724 return openlockstateid(stid
);
727 static void nfs4_free_deleg(struct nfs4_stid
*stid
)
729 kmem_cache_free(deleg_slab
, stid
);
730 atomic_long_dec(&num_delegations
);
734 * When we recall a delegation, we should be careful not to hand it
735 * out again straight away.
736 * To ensure this we keep a pair of bloom filters ('new' and 'old')
737 * in which the filehandles of recalled delegations are "stored".
738 * If a filehandle appear in either filter, a delegation is blocked.
739 * When a delegation is recalled, the filehandle is stored in the "new"
741 * Every 30 seconds we swap the filters and clear the "new" one,
742 * unless both are empty of course.
744 * Each filter is 256 bits. We hash the filehandle to 32bit and use the
745 * low 3 bytes as hash-table indices.
747 * 'blocked_delegations_lock', which is always taken in block_delegations(),
748 * is used to manage concurrent access. Testing does not need the lock
749 * except when swapping the two filters.
751 static DEFINE_SPINLOCK(blocked_delegations_lock
);
752 static struct bloom_pair
{
753 int entries
, old_entries
;
755 int new; /* index into 'set' */
756 DECLARE_BITMAP(set
[2], 256);
757 } blocked_delegations
;
759 static int delegation_blocked(struct knfsd_fh
*fh
)
762 struct bloom_pair
*bd
= &blocked_delegations
;
764 if (bd
->entries
== 0)
766 if (seconds_since_boot() - bd
->swap_time
> 30) {
767 spin_lock(&blocked_delegations_lock
);
768 if (seconds_since_boot() - bd
->swap_time
> 30) {
769 bd
->entries
-= bd
->old_entries
;
770 bd
->old_entries
= bd
->entries
;
771 memset(bd
->set
[bd
->new], 0,
774 bd
->swap_time
= seconds_since_boot();
776 spin_unlock(&blocked_delegations_lock
);
778 hash
= jhash(&fh
->fh_base
, fh
->fh_size
, 0);
779 if (test_bit(hash
&255, bd
->set
[0]) &&
780 test_bit((hash
>>8)&255, bd
->set
[0]) &&
781 test_bit((hash
>>16)&255, bd
->set
[0]))
784 if (test_bit(hash
&255, bd
->set
[1]) &&
785 test_bit((hash
>>8)&255, bd
->set
[1]) &&
786 test_bit((hash
>>16)&255, bd
->set
[1]))
792 static void block_delegations(struct knfsd_fh
*fh
)
795 struct bloom_pair
*bd
= &blocked_delegations
;
797 hash
= jhash(&fh
->fh_base
, fh
->fh_size
, 0);
799 spin_lock(&blocked_delegations_lock
);
800 __set_bit(hash
&255, bd
->set
[bd
->new]);
801 __set_bit((hash
>>8)&255, bd
->set
[bd
->new]);
802 __set_bit((hash
>>16)&255, bd
->set
[bd
->new]);
803 if (bd
->entries
== 0)
804 bd
->swap_time
= seconds_since_boot();
806 spin_unlock(&blocked_delegations_lock
);
809 static struct nfs4_delegation
*
810 alloc_init_deleg(struct nfs4_client
*clp
, struct nfs4_file
*fp
,
811 struct svc_fh
*current_fh
,
812 struct nfs4_clnt_odstate
*odstate
)
814 struct nfs4_delegation
*dp
;
817 dprintk("NFSD alloc_init_deleg\n");
818 n
= atomic_long_inc_return(&num_delegations
);
819 if (n
< 0 || n
> max_delegations
)
821 if (delegation_blocked(¤t_fh
->fh_handle
))
823 dp
= delegstateid(nfs4_alloc_stid(clp
, deleg_slab
, nfs4_free_deleg
));
828 * delegation seqid's are never incremented. The 4.1 special
829 * meaning of seqid 0 isn't meaningful, really, but let's avoid
830 * 0 anyway just for consistency and use 1:
832 dp
->dl_stid
.sc_stateid
.si_generation
= 1;
833 INIT_LIST_HEAD(&dp
->dl_perfile
);
834 INIT_LIST_HEAD(&dp
->dl_perclnt
);
835 INIT_LIST_HEAD(&dp
->dl_recall_lru
);
836 dp
->dl_clnt_odstate
= odstate
;
837 get_clnt_odstate(odstate
);
838 dp
->dl_type
= NFS4_OPEN_DELEGATE_READ
;
840 nfsd4_init_cb(&dp
->dl_recall
, dp
->dl_stid
.sc_client
,
841 &nfsd4_cb_recall_ops
, NFSPROC4_CLNT_CB_RECALL
);
843 dp
->dl_stid
.sc_file
= fp
;
846 atomic_long_dec(&num_delegations
);
851 nfs4_put_stid(struct nfs4_stid
*s
)
853 struct nfs4_file
*fp
= s
->sc_file
;
854 struct nfs4_client
*clp
= s
->sc_client
;
856 might_lock(&clp
->cl_lock
);
858 if (!refcount_dec_and_lock(&s
->sc_count
, &clp
->cl_lock
)) {
859 wake_up_all(&close_wq
);
862 idr_remove(&clp
->cl_stateids
, s
->sc_stateid
.si_opaque
.so_id
);
863 spin_unlock(&clp
->cl_lock
);
870 nfs4_inc_and_copy_stateid(stateid_t
*dst
, struct nfs4_stid
*stid
)
872 stateid_t
*src
= &stid
->sc_stateid
;
874 spin_lock(&stid
->sc_lock
);
875 if (unlikely(++src
->si_generation
== 0))
876 src
->si_generation
= 1;
877 memcpy(dst
, src
, sizeof(*dst
));
878 spin_unlock(&stid
->sc_lock
);
881 static void put_deleg_file(struct nfs4_file
*fp
)
883 struct file
*filp
= NULL
;
885 spin_lock(&fp
->fi_lock
);
886 if (--fp
->fi_delegees
== 0)
887 swap(filp
, fp
->fi_deleg_file
);
888 spin_unlock(&fp
->fi_lock
);
894 static void nfs4_unlock_deleg_lease(struct nfs4_delegation
*dp
)
896 struct nfs4_file
*fp
= dp
->dl_stid
.sc_file
;
897 struct file
*filp
= fp
->fi_deleg_file
;
899 WARN_ON_ONCE(!fp
->fi_delegees
);
901 vfs_setlease(filp
, F_UNLCK
, NULL
, (void **)&dp
);
905 static void destroy_unhashed_deleg(struct nfs4_delegation
*dp
)
907 put_clnt_odstate(dp
->dl_clnt_odstate
);
908 nfs4_unlock_deleg_lease(dp
);
909 nfs4_put_stid(&dp
->dl_stid
);
912 void nfs4_unhash_stid(struct nfs4_stid
*s
)
918 * nfs4_delegation_exists - Discover if this delegation already exists
919 * @clp: a pointer to the nfs4_client we're granting a delegation to
920 * @fp: a pointer to the nfs4_file we're granting a delegation on
923 * On success: true iff an existing delegation is found
927 nfs4_delegation_exists(struct nfs4_client
*clp
, struct nfs4_file
*fp
)
929 struct nfs4_delegation
*searchdp
= NULL
;
930 struct nfs4_client
*searchclp
= NULL
;
932 lockdep_assert_held(&state_lock
);
933 lockdep_assert_held(&fp
->fi_lock
);
935 list_for_each_entry(searchdp
, &fp
->fi_delegations
, dl_perfile
) {
936 searchclp
= searchdp
->dl_stid
.sc_client
;
937 if (clp
== searchclp
) {
945 * hash_delegation_locked - Add a delegation to the appropriate lists
946 * @dp: a pointer to the nfs4_delegation we are adding.
947 * @fp: a pointer to the nfs4_file we're granting a delegation on
950 * On success: NULL if the delegation was successfully hashed.
952 * On error: -EAGAIN if one was previously granted to this
953 * nfs4_client for this nfs4_file. Delegation is not hashed.
958 hash_delegation_locked(struct nfs4_delegation
*dp
, struct nfs4_file
*fp
)
960 struct nfs4_client
*clp
= dp
->dl_stid
.sc_client
;
962 lockdep_assert_held(&state_lock
);
963 lockdep_assert_held(&fp
->fi_lock
);
965 if (nfs4_delegation_exists(clp
, fp
))
967 refcount_inc(&dp
->dl_stid
.sc_count
);
968 dp
->dl_stid
.sc_type
= NFS4_DELEG_STID
;
969 list_add(&dp
->dl_perfile
, &fp
->fi_delegations
);
970 list_add(&dp
->dl_perclnt
, &clp
->cl_delegations
);
975 unhash_delegation_locked(struct nfs4_delegation
*dp
)
977 struct nfs4_file
*fp
= dp
->dl_stid
.sc_file
;
979 lockdep_assert_held(&state_lock
);
981 if (list_empty(&dp
->dl_perfile
))
984 dp
->dl_stid
.sc_type
= NFS4_CLOSED_DELEG_STID
;
985 /* Ensure that deleg break won't try to requeue it */
987 spin_lock(&fp
->fi_lock
);
988 list_del_init(&dp
->dl_perclnt
);
989 list_del_init(&dp
->dl_recall_lru
);
990 list_del_init(&dp
->dl_perfile
);
991 spin_unlock(&fp
->fi_lock
);
995 static void destroy_delegation(struct nfs4_delegation
*dp
)
999 spin_lock(&state_lock
);
1000 unhashed
= unhash_delegation_locked(dp
);
1001 spin_unlock(&state_lock
);
1003 destroy_unhashed_deleg(dp
);
1006 static void revoke_delegation(struct nfs4_delegation
*dp
)
1008 struct nfs4_client
*clp
= dp
->dl_stid
.sc_client
;
1010 WARN_ON(!list_empty(&dp
->dl_recall_lru
));
1012 if (clp
->cl_minorversion
) {
1013 dp
->dl_stid
.sc_type
= NFS4_REVOKED_DELEG_STID
;
1014 refcount_inc(&dp
->dl_stid
.sc_count
);
1015 spin_lock(&clp
->cl_lock
);
1016 list_add(&dp
->dl_recall_lru
, &clp
->cl_revoked
);
1017 spin_unlock(&clp
->cl_lock
);
1019 destroy_unhashed_deleg(dp
);
1026 static unsigned int clientid_hashval(u32 id
)
1028 return id
& CLIENT_HASH_MASK
;
1031 static unsigned int clientstr_hashval(const char *name
)
1033 return opaque_hashval(name
, 8) & CLIENT_HASH_MASK
;
1037 * We store the NONE, READ, WRITE, and BOTH bits separately in the
1038 * st_{access,deny}_bmap field of the stateid, in order to track not
1039 * only what share bits are currently in force, but also what
1040 * combinations of share bits previous opens have used. This allows us
1041 * to enforce the recommendation of rfc 3530 14.2.19 that the server
1042 * return an error if the client attempt to downgrade to a combination
1043 * of share bits not explicable by closing some of its previous opens.
1045 * XXX: This enforcement is actually incomplete, since we don't keep
1046 * track of access/deny bit combinations; so, e.g., we allow:
1048 * OPEN allow read, deny write
1049 * OPEN allow both, deny none
1050 * DOWNGRADE allow read, deny none
1052 * which we should reject.
1055 bmap_to_share_mode(unsigned long bmap
) {
1057 unsigned int access
= 0;
1059 for (i
= 1; i
< 4; i
++) {
1060 if (test_bit(i
, &bmap
))
1066 /* set share access for a given stateid */
1068 set_access(u32 access
, struct nfs4_ol_stateid
*stp
)
1070 unsigned char mask
= 1 << access
;
1072 WARN_ON_ONCE(access
> NFS4_SHARE_ACCESS_BOTH
);
1073 stp
->st_access_bmap
|= mask
;
1076 /* clear share access for a given stateid */
1078 clear_access(u32 access
, struct nfs4_ol_stateid
*stp
)
1080 unsigned char mask
= 1 << access
;
1082 WARN_ON_ONCE(access
> NFS4_SHARE_ACCESS_BOTH
);
1083 stp
->st_access_bmap
&= ~mask
;
1086 /* test whether a given stateid has access */
1088 test_access(u32 access
, struct nfs4_ol_stateid
*stp
)
1090 unsigned char mask
= 1 << access
;
1092 return (bool)(stp
->st_access_bmap
& mask
);
1095 /* set share deny for a given stateid */
1097 set_deny(u32 deny
, struct nfs4_ol_stateid
*stp
)
1099 unsigned char mask
= 1 << deny
;
1101 WARN_ON_ONCE(deny
> NFS4_SHARE_DENY_BOTH
);
1102 stp
->st_deny_bmap
|= mask
;
1105 /* clear share deny for a given stateid */
1107 clear_deny(u32 deny
, struct nfs4_ol_stateid
*stp
)
1109 unsigned char mask
= 1 << deny
;
1111 WARN_ON_ONCE(deny
> NFS4_SHARE_DENY_BOTH
);
1112 stp
->st_deny_bmap
&= ~mask
;
1115 /* test whether a given stateid is denying specific access */
1117 test_deny(u32 deny
, struct nfs4_ol_stateid
*stp
)
1119 unsigned char mask
= 1 << deny
;
1121 return (bool)(stp
->st_deny_bmap
& mask
);
1124 static int nfs4_access_to_omode(u32 access
)
1126 switch (access
& NFS4_SHARE_ACCESS_BOTH
) {
1127 case NFS4_SHARE_ACCESS_READ
:
1129 case NFS4_SHARE_ACCESS_WRITE
:
1131 case NFS4_SHARE_ACCESS_BOTH
:
1139 * A stateid that had a deny mode associated with it is being released
1140 * or downgraded. Recalculate the deny mode on the file.
1143 recalculate_deny_mode(struct nfs4_file
*fp
)
1145 struct nfs4_ol_stateid
*stp
;
1147 spin_lock(&fp
->fi_lock
);
1148 fp
->fi_share_deny
= 0;
1149 list_for_each_entry(stp
, &fp
->fi_stateids
, st_perfile
)
1150 fp
->fi_share_deny
|= bmap_to_share_mode(stp
->st_deny_bmap
);
1151 spin_unlock(&fp
->fi_lock
);
1155 reset_union_bmap_deny(u32 deny
, struct nfs4_ol_stateid
*stp
)
1158 bool change
= false;
1160 for (i
= 1; i
< 4; i
++) {
1161 if ((i
& deny
) != i
) {
1167 /* Recalculate per-file deny mode if there was a change */
1169 recalculate_deny_mode(stp
->st_stid
.sc_file
);
1172 /* release all access and file references for a given stateid */
1174 release_all_access(struct nfs4_ol_stateid
*stp
)
1177 struct nfs4_file
*fp
= stp
->st_stid
.sc_file
;
1179 if (fp
&& stp
->st_deny_bmap
!= 0)
1180 recalculate_deny_mode(fp
);
1182 for (i
= 1; i
< 4; i
++) {
1183 if (test_access(i
, stp
))
1184 nfs4_file_put_access(stp
->st_stid
.sc_file
, i
);
1185 clear_access(i
, stp
);
1189 static inline void nfs4_free_stateowner(struct nfs4_stateowner
*sop
)
1191 kfree(sop
->so_owner
.data
);
1192 sop
->so_ops
->so_free(sop
);
1195 static void nfs4_put_stateowner(struct nfs4_stateowner
*sop
)
1197 struct nfs4_client
*clp
= sop
->so_client
;
1199 might_lock(&clp
->cl_lock
);
1201 if (!atomic_dec_and_lock(&sop
->so_count
, &clp
->cl_lock
))
1203 sop
->so_ops
->so_unhash(sop
);
1204 spin_unlock(&clp
->cl_lock
);
1205 nfs4_free_stateowner(sop
);
1208 static bool unhash_ol_stateid(struct nfs4_ol_stateid
*stp
)
1210 struct nfs4_file
*fp
= stp
->st_stid
.sc_file
;
1212 lockdep_assert_held(&stp
->st_stateowner
->so_client
->cl_lock
);
1214 if (list_empty(&stp
->st_perfile
))
1217 spin_lock(&fp
->fi_lock
);
1218 list_del_init(&stp
->st_perfile
);
1219 spin_unlock(&fp
->fi_lock
);
1220 list_del(&stp
->st_perstateowner
);
1224 static void nfs4_free_ol_stateid(struct nfs4_stid
*stid
)
1226 struct nfs4_ol_stateid
*stp
= openlockstateid(stid
);
1228 put_clnt_odstate(stp
->st_clnt_odstate
);
1229 release_all_access(stp
);
1230 if (stp
->st_stateowner
)
1231 nfs4_put_stateowner(stp
->st_stateowner
);
1232 kmem_cache_free(stateid_slab
, stid
);
1235 static void nfs4_free_lock_stateid(struct nfs4_stid
*stid
)
1237 struct nfs4_ol_stateid
*stp
= openlockstateid(stid
);
1238 struct nfs4_lockowner
*lo
= lockowner(stp
->st_stateowner
);
1241 file
= find_any_file(stp
->st_stid
.sc_file
);
1243 filp_close(file
, (fl_owner_t
)lo
);
1244 nfs4_free_ol_stateid(stid
);
1248 * Put the persistent reference to an already unhashed generic stateid, while
1249 * holding the cl_lock. If it's the last reference, then put it onto the
1250 * reaplist for later destruction.
1252 static void put_ol_stateid_locked(struct nfs4_ol_stateid
*stp
,
1253 struct list_head
*reaplist
)
1255 struct nfs4_stid
*s
= &stp
->st_stid
;
1256 struct nfs4_client
*clp
= s
->sc_client
;
1258 lockdep_assert_held(&clp
->cl_lock
);
1260 WARN_ON_ONCE(!list_empty(&stp
->st_locks
));
1262 if (!refcount_dec_and_test(&s
->sc_count
)) {
1263 wake_up_all(&close_wq
);
1267 idr_remove(&clp
->cl_stateids
, s
->sc_stateid
.si_opaque
.so_id
);
1268 list_add(&stp
->st_locks
, reaplist
);
1271 static bool unhash_lock_stateid(struct nfs4_ol_stateid
*stp
)
1273 lockdep_assert_held(&stp
->st_stid
.sc_client
->cl_lock
);
1275 list_del_init(&stp
->st_locks
);
1276 nfs4_unhash_stid(&stp
->st_stid
);
1277 return unhash_ol_stateid(stp
);
1280 static void release_lock_stateid(struct nfs4_ol_stateid
*stp
)
1282 struct nfs4_client
*clp
= stp
->st_stid
.sc_client
;
1285 spin_lock(&clp
->cl_lock
);
1286 unhashed
= unhash_lock_stateid(stp
);
1287 spin_unlock(&clp
->cl_lock
);
1289 nfs4_put_stid(&stp
->st_stid
);
1292 static void unhash_lockowner_locked(struct nfs4_lockowner
*lo
)
1294 struct nfs4_client
*clp
= lo
->lo_owner
.so_client
;
1296 lockdep_assert_held(&clp
->cl_lock
);
1298 list_del_init(&lo
->lo_owner
.so_strhash
);
1302 * Free a list of generic stateids that were collected earlier after being
1306 free_ol_stateid_reaplist(struct list_head
*reaplist
)
1308 struct nfs4_ol_stateid
*stp
;
1309 struct nfs4_file
*fp
;
1313 while (!list_empty(reaplist
)) {
1314 stp
= list_first_entry(reaplist
, struct nfs4_ol_stateid
,
1316 list_del(&stp
->st_locks
);
1317 fp
= stp
->st_stid
.sc_file
;
1318 stp
->st_stid
.sc_free(&stp
->st_stid
);
1324 static void release_open_stateid_locks(struct nfs4_ol_stateid
*open_stp
,
1325 struct list_head
*reaplist
)
1327 struct nfs4_ol_stateid
*stp
;
1329 lockdep_assert_held(&open_stp
->st_stid
.sc_client
->cl_lock
);
1331 while (!list_empty(&open_stp
->st_locks
)) {
1332 stp
= list_entry(open_stp
->st_locks
.next
,
1333 struct nfs4_ol_stateid
, st_locks
);
1334 WARN_ON(!unhash_lock_stateid(stp
));
1335 put_ol_stateid_locked(stp
, reaplist
);
1339 static bool unhash_open_stateid(struct nfs4_ol_stateid
*stp
,
1340 struct list_head
*reaplist
)
1344 lockdep_assert_held(&stp
->st_stid
.sc_client
->cl_lock
);
1346 unhashed
= unhash_ol_stateid(stp
);
1347 release_open_stateid_locks(stp
, reaplist
);
1351 static void release_open_stateid(struct nfs4_ol_stateid
*stp
)
1353 LIST_HEAD(reaplist
);
1355 spin_lock(&stp
->st_stid
.sc_client
->cl_lock
);
1356 if (unhash_open_stateid(stp
, &reaplist
))
1357 put_ol_stateid_locked(stp
, &reaplist
);
1358 spin_unlock(&stp
->st_stid
.sc_client
->cl_lock
);
1359 free_ol_stateid_reaplist(&reaplist
);
1362 static void unhash_openowner_locked(struct nfs4_openowner
*oo
)
1364 struct nfs4_client
*clp
= oo
->oo_owner
.so_client
;
1366 lockdep_assert_held(&clp
->cl_lock
);
1368 list_del_init(&oo
->oo_owner
.so_strhash
);
1369 list_del_init(&oo
->oo_perclient
);
1372 static void release_last_closed_stateid(struct nfs4_openowner
*oo
)
1374 struct nfsd_net
*nn
= net_generic(oo
->oo_owner
.so_client
->net
,
1376 struct nfs4_ol_stateid
*s
;
1378 spin_lock(&nn
->client_lock
);
1379 s
= oo
->oo_last_closed_stid
;
1381 list_del_init(&oo
->oo_close_lru
);
1382 oo
->oo_last_closed_stid
= NULL
;
1384 spin_unlock(&nn
->client_lock
);
1386 nfs4_put_stid(&s
->st_stid
);
1389 static void release_openowner(struct nfs4_openowner
*oo
)
1391 struct nfs4_ol_stateid
*stp
;
1392 struct nfs4_client
*clp
= oo
->oo_owner
.so_client
;
1393 struct list_head reaplist
;
1395 INIT_LIST_HEAD(&reaplist
);
1397 spin_lock(&clp
->cl_lock
);
1398 unhash_openowner_locked(oo
);
1399 while (!list_empty(&oo
->oo_owner
.so_stateids
)) {
1400 stp
= list_first_entry(&oo
->oo_owner
.so_stateids
,
1401 struct nfs4_ol_stateid
, st_perstateowner
);
1402 if (unhash_open_stateid(stp
, &reaplist
))
1403 put_ol_stateid_locked(stp
, &reaplist
);
1405 spin_unlock(&clp
->cl_lock
);
1406 free_ol_stateid_reaplist(&reaplist
);
1407 release_last_closed_stateid(oo
);
1408 nfs4_put_stateowner(&oo
->oo_owner
);
1412 hash_sessionid(struct nfs4_sessionid
*sessionid
)
1414 struct nfsd4_sessionid
*sid
= (struct nfsd4_sessionid
*)sessionid
;
1416 return sid
->sequence
% SESSION_HASH_SIZE
;
1419 #ifdef CONFIG_SUNRPC_DEBUG
1421 dump_sessionid(const char *fn
, struct nfs4_sessionid
*sessionid
)
1423 u32
*ptr
= (u32
*)(&sessionid
->data
[0]);
1424 dprintk("%s: %u:%u:%u:%u\n", fn
, ptr
[0], ptr
[1], ptr
[2], ptr
[3]);
1428 dump_sessionid(const char *fn
, struct nfs4_sessionid
*sessionid
)
1434 * Bump the seqid on cstate->replay_owner, and clear replay_owner if it
1435 * won't be used for replay.
1437 void nfsd4_bump_seqid(struct nfsd4_compound_state
*cstate
, __be32 nfserr
)
1439 struct nfs4_stateowner
*so
= cstate
->replay_owner
;
1441 if (nfserr
== nfserr_replay_me
)
1444 if (!seqid_mutating_err(ntohl(nfserr
))) {
1445 nfsd4_cstate_clear_replay(cstate
);
1450 if (so
->so_is_open_owner
)
1451 release_last_closed_stateid(openowner(so
));
1457 gen_sessionid(struct nfsd4_session
*ses
)
1459 struct nfs4_client
*clp
= ses
->se_client
;
1460 struct nfsd4_sessionid
*sid
;
1462 sid
= (struct nfsd4_sessionid
*)ses
->se_sessionid
.data
;
1463 sid
->clientid
= clp
->cl_clientid
;
1464 sid
->sequence
= current_sessionid
++;
1469 * The protocol defines ca_maxresponssize_cached to include the size of
1470 * the rpc header, but all we need to cache is the data starting after
1471 * the end of the initial SEQUENCE operation--the rest we regenerate
1472 * each time. Therefore we can advertise a ca_maxresponssize_cached
1473 * value that is the number of bytes in our cache plus a few additional
1474 * bytes. In order to stay on the safe side, and not promise more than
1475 * we can cache, those additional bytes must be the minimum possible: 24
1476 * bytes of rpc header (xid through accept state, with AUTH_NULL
1477 * verifier), 12 for the compound header (with zero-length tag), and 44
1478 * for the SEQUENCE op response:
1480 #define NFSD_MIN_HDR_SEQ_SZ (24 + 12 + 44)
1483 free_session_slots(struct nfsd4_session
*ses
)
1487 for (i
= 0; i
< ses
->se_fchannel
.maxreqs
; i
++) {
1488 free_svc_cred(&ses
->se_slots
[i
]->sl_cred
);
1489 kfree(ses
->se_slots
[i
]);
1494 * We don't actually need to cache the rpc and session headers, so we
1495 * can allocate a little less for each slot:
1497 static inline u32
slot_bytes(struct nfsd4_channel_attrs
*ca
)
1501 if (ca
->maxresp_cached
< NFSD_MIN_HDR_SEQ_SZ
)
1504 size
= ca
->maxresp_cached
- NFSD_MIN_HDR_SEQ_SZ
;
1505 return size
+ sizeof(struct nfsd4_slot
);
1509 * XXX: If we run out of reserved DRC memory we could (up to a point)
1510 * re-negotiate active sessions and reduce their slot usage to make
1511 * room for new connections. For now we just fail the create session.
1513 static u32
nfsd4_get_drc_mem(struct nfsd4_channel_attrs
*ca
)
1515 u32 slotsize
= slot_bytes(ca
);
1516 u32 num
= ca
->maxreqs
;
1519 spin_lock(&nfsd_drc_lock
);
1520 avail
= min((unsigned long)NFSD_MAX_MEM_PER_SESSION
,
1521 nfsd_drc_max_mem
- nfsd_drc_mem_used
);
1523 * Never use more than a third of the remaining memory,
1524 * unless it's the only way to give this client a slot:
1526 avail
= clamp_t(int, avail
, slotsize
, avail
/3);
1527 num
= min_t(int, num
, avail
/ slotsize
);
1528 nfsd_drc_mem_used
+= num
* slotsize
;
1529 spin_unlock(&nfsd_drc_lock
);
1534 static void nfsd4_put_drc_mem(struct nfsd4_channel_attrs
*ca
)
1536 int slotsize
= slot_bytes(ca
);
1538 spin_lock(&nfsd_drc_lock
);
1539 nfsd_drc_mem_used
-= slotsize
* ca
->maxreqs
;
1540 spin_unlock(&nfsd_drc_lock
);
1543 static struct nfsd4_session
*alloc_session(struct nfsd4_channel_attrs
*fattrs
,
1544 struct nfsd4_channel_attrs
*battrs
)
1546 int numslots
= fattrs
->maxreqs
;
1547 int slotsize
= slot_bytes(fattrs
);
1548 struct nfsd4_session
*new;
1551 BUILD_BUG_ON(NFSD_MAX_SLOTS_PER_SESSION
* sizeof(struct nfsd4_slot
*)
1552 + sizeof(struct nfsd4_session
) > PAGE_SIZE
);
1553 mem
= numslots
* sizeof(struct nfsd4_slot
*);
1555 new = kzalloc(sizeof(*new) + mem
, GFP_KERNEL
);
1558 /* allocate each struct nfsd4_slot and data cache in one piece */
1559 for (i
= 0; i
< numslots
; i
++) {
1560 new->se_slots
[i
] = kzalloc(slotsize
, GFP_KERNEL
);
1561 if (!new->se_slots
[i
])
1565 memcpy(&new->se_fchannel
, fattrs
, sizeof(struct nfsd4_channel_attrs
));
1566 memcpy(&new->se_bchannel
, battrs
, sizeof(struct nfsd4_channel_attrs
));
1571 kfree(new->se_slots
[i
]);
1576 static void free_conn(struct nfsd4_conn
*c
)
1578 svc_xprt_put(c
->cn_xprt
);
1582 static void nfsd4_conn_lost(struct svc_xpt_user
*u
)
1584 struct nfsd4_conn
*c
= container_of(u
, struct nfsd4_conn
, cn_xpt_user
);
1585 struct nfs4_client
*clp
= c
->cn_session
->se_client
;
1587 spin_lock(&clp
->cl_lock
);
1588 if (!list_empty(&c
->cn_persession
)) {
1589 list_del(&c
->cn_persession
);
1592 nfsd4_probe_callback(clp
);
1593 spin_unlock(&clp
->cl_lock
);
1596 static struct nfsd4_conn
*alloc_conn(struct svc_rqst
*rqstp
, u32 flags
)
1598 struct nfsd4_conn
*conn
;
1600 conn
= kmalloc(sizeof(struct nfsd4_conn
), GFP_KERNEL
);
1603 svc_xprt_get(rqstp
->rq_xprt
);
1604 conn
->cn_xprt
= rqstp
->rq_xprt
;
1605 conn
->cn_flags
= flags
;
1606 INIT_LIST_HEAD(&conn
->cn_xpt_user
.list
);
1610 static void __nfsd4_hash_conn(struct nfsd4_conn
*conn
, struct nfsd4_session
*ses
)
1612 conn
->cn_session
= ses
;
1613 list_add(&conn
->cn_persession
, &ses
->se_conns
);
1616 static void nfsd4_hash_conn(struct nfsd4_conn
*conn
, struct nfsd4_session
*ses
)
1618 struct nfs4_client
*clp
= ses
->se_client
;
1620 spin_lock(&clp
->cl_lock
);
1621 __nfsd4_hash_conn(conn
, ses
);
1622 spin_unlock(&clp
->cl_lock
);
1625 static int nfsd4_register_conn(struct nfsd4_conn
*conn
)
1627 conn
->cn_xpt_user
.callback
= nfsd4_conn_lost
;
1628 return register_xpt_user(conn
->cn_xprt
, &conn
->cn_xpt_user
);
1631 static void nfsd4_init_conn(struct svc_rqst
*rqstp
, struct nfsd4_conn
*conn
, struct nfsd4_session
*ses
)
1635 nfsd4_hash_conn(conn
, ses
);
1636 ret
= nfsd4_register_conn(conn
);
1638 /* oops; xprt is already down: */
1639 nfsd4_conn_lost(&conn
->cn_xpt_user
);
1640 /* We may have gained or lost a callback channel: */
1641 nfsd4_probe_callback_sync(ses
->se_client
);
1644 static struct nfsd4_conn
*alloc_conn_from_crses(struct svc_rqst
*rqstp
, struct nfsd4_create_session
*cses
)
1646 u32 dir
= NFS4_CDFC4_FORE
;
1648 if (cses
->flags
& SESSION4_BACK_CHAN
)
1649 dir
|= NFS4_CDFC4_BACK
;
1650 return alloc_conn(rqstp
, dir
);
1653 /* must be called under client_lock */
1654 static void nfsd4_del_conns(struct nfsd4_session
*s
)
1656 struct nfs4_client
*clp
= s
->se_client
;
1657 struct nfsd4_conn
*c
;
1659 spin_lock(&clp
->cl_lock
);
1660 while (!list_empty(&s
->se_conns
)) {
1661 c
= list_first_entry(&s
->se_conns
, struct nfsd4_conn
, cn_persession
);
1662 list_del_init(&c
->cn_persession
);
1663 spin_unlock(&clp
->cl_lock
);
1665 unregister_xpt_user(c
->cn_xprt
, &c
->cn_xpt_user
);
1668 spin_lock(&clp
->cl_lock
);
1670 spin_unlock(&clp
->cl_lock
);
1673 static void __free_session(struct nfsd4_session
*ses
)
1675 free_session_slots(ses
);
1679 static void free_session(struct nfsd4_session
*ses
)
1681 nfsd4_del_conns(ses
);
1682 nfsd4_put_drc_mem(&ses
->se_fchannel
);
1683 __free_session(ses
);
1686 static void init_session(struct svc_rqst
*rqstp
, struct nfsd4_session
*new, struct nfs4_client
*clp
, struct nfsd4_create_session
*cses
)
1689 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
1691 new->se_client
= clp
;
1694 INIT_LIST_HEAD(&new->se_conns
);
1696 new->se_cb_seq_nr
= 1;
1697 new->se_flags
= cses
->flags
;
1698 new->se_cb_prog
= cses
->callback_prog
;
1699 new->se_cb_sec
= cses
->cb_sec
;
1700 atomic_set(&new->se_ref
, 0);
1701 idx
= hash_sessionid(&new->se_sessionid
);
1702 list_add(&new->se_hash
, &nn
->sessionid_hashtbl
[idx
]);
1703 spin_lock(&clp
->cl_lock
);
1704 list_add(&new->se_perclnt
, &clp
->cl_sessions
);
1705 spin_unlock(&clp
->cl_lock
);
1708 struct sockaddr
*sa
= svc_addr(rqstp
);
1710 * This is a little silly; with sessions there's no real
1711 * use for the callback address. Use the peer address
1712 * as a reasonable default for now, but consider fixing
1713 * the rpc client not to require an address in the
1716 rpc_copy_addr((struct sockaddr
*)&clp
->cl_cb_conn
.cb_addr
, sa
);
1717 clp
->cl_cb_conn
.cb_addrlen
= svc_addr_len(sa
);
1721 /* caller must hold client_lock */
1722 static struct nfsd4_session
*
1723 __find_in_sessionid_hashtbl(struct nfs4_sessionid
*sessionid
, struct net
*net
)
1725 struct nfsd4_session
*elem
;
1727 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
1729 lockdep_assert_held(&nn
->client_lock
);
1731 dump_sessionid(__func__
, sessionid
);
1732 idx
= hash_sessionid(sessionid
);
1733 /* Search in the appropriate list */
1734 list_for_each_entry(elem
, &nn
->sessionid_hashtbl
[idx
], se_hash
) {
1735 if (!memcmp(elem
->se_sessionid
.data
, sessionid
->data
,
1736 NFS4_MAX_SESSIONID_LEN
)) {
1741 dprintk("%s: session not found\n", __func__
);
1745 static struct nfsd4_session
*
1746 find_in_sessionid_hashtbl(struct nfs4_sessionid
*sessionid
, struct net
*net
,
1749 struct nfsd4_session
*session
;
1750 __be32 status
= nfserr_badsession
;
1752 session
= __find_in_sessionid_hashtbl(sessionid
, net
);
1755 status
= nfsd4_get_session_locked(session
);
1763 /* caller must hold client_lock */
1765 unhash_session(struct nfsd4_session
*ses
)
1767 struct nfs4_client
*clp
= ses
->se_client
;
1768 struct nfsd_net
*nn
= net_generic(clp
->net
, nfsd_net_id
);
1770 lockdep_assert_held(&nn
->client_lock
);
1772 list_del(&ses
->se_hash
);
1773 spin_lock(&ses
->se_client
->cl_lock
);
1774 list_del(&ses
->se_perclnt
);
1775 spin_unlock(&ses
->se_client
->cl_lock
);
1778 /* SETCLIENTID and SETCLIENTID_CONFIRM Helper functions */
1780 STALE_CLIENTID(clientid_t
*clid
, struct nfsd_net
*nn
)
1783 * We're assuming the clid was not given out from a boot
1784 * precisely 2^32 (about 136 years) before this one. That seems
1785 * a safe assumption:
1787 if (clid
->cl_boot
== (u32
)nn
->boot_time
)
1789 dprintk("NFSD stale clientid (%08x/%08x) boot_time %08lx\n",
1790 clid
->cl_boot
, clid
->cl_id
, nn
->boot_time
);
1795 * XXX Should we use a slab cache ?
1796 * This type of memory management is somewhat inefficient, but we use it
1797 * anyway since SETCLIENTID is not a common operation.
1799 static struct nfs4_client
*alloc_client(struct xdr_netobj name
)
1801 struct nfs4_client
*clp
;
1804 clp
= kmem_cache_zalloc(client_slab
, GFP_KERNEL
);
1807 clp
->cl_name
.data
= kmemdup(name
.data
, name
.len
, GFP_KERNEL
);
1808 if (clp
->cl_name
.data
== NULL
)
1810 clp
->cl_ownerstr_hashtbl
= kmalloc_array(OWNER_HASH_SIZE
,
1811 sizeof(struct list_head
),
1813 if (!clp
->cl_ownerstr_hashtbl
)
1814 goto err_no_hashtbl
;
1815 for (i
= 0; i
< OWNER_HASH_SIZE
; i
++)
1816 INIT_LIST_HEAD(&clp
->cl_ownerstr_hashtbl
[i
]);
1817 clp
->cl_name
.len
= name
.len
;
1818 INIT_LIST_HEAD(&clp
->cl_sessions
);
1819 idr_init(&clp
->cl_stateids
);
1820 atomic_set(&clp
->cl_refcount
, 0);
1821 clp
->cl_cb_state
= NFSD4_CB_UNKNOWN
;
1822 INIT_LIST_HEAD(&clp
->cl_idhash
);
1823 INIT_LIST_HEAD(&clp
->cl_openowners
);
1824 INIT_LIST_HEAD(&clp
->cl_delegations
);
1825 INIT_LIST_HEAD(&clp
->cl_lru
);
1826 INIT_LIST_HEAD(&clp
->cl_revoked
);
1827 #ifdef CONFIG_NFSD_PNFS
1828 INIT_LIST_HEAD(&clp
->cl_lo_states
);
1830 spin_lock_init(&clp
->cl_lock
);
1831 rpc_init_wait_queue(&clp
->cl_cb_waitq
, "Backchannel slot table");
1834 kfree(clp
->cl_name
.data
);
1836 kmem_cache_free(client_slab
, clp
);
1841 free_client(struct nfs4_client
*clp
)
1843 while (!list_empty(&clp
->cl_sessions
)) {
1844 struct nfsd4_session
*ses
;
1845 ses
= list_entry(clp
->cl_sessions
.next
, struct nfsd4_session
,
1847 list_del(&ses
->se_perclnt
);
1848 WARN_ON_ONCE(atomic_read(&ses
->se_ref
));
1851 rpc_destroy_wait_queue(&clp
->cl_cb_waitq
);
1852 free_svc_cred(&clp
->cl_cred
);
1853 kfree(clp
->cl_ownerstr_hashtbl
);
1854 kfree(clp
->cl_name
.data
);
1855 idr_destroy(&clp
->cl_stateids
);
1856 kmem_cache_free(client_slab
, clp
);
1859 /* must be called under the client_lock */
1861 unhash_client_locked(struct nfs4_client
*clp
)
1863 struct nfsd_net
*nn
= net_generic(clp
->net
, nfsd_net_id
);
1864 struct nfsd4_session
*ses
;
1866 lockdep_assert_held(&nn
->client_lock
);
1868 /* Mark the client as expired! */
1870 /* Make it invisible */
1871 if (!list_empty(&clp
->cl_idhash
)) {
1872 list_del_init(&clp
->cl_idhash
);
1873 if (test_bit(NFSD4_CLIENT_CONFIRMED
, &clp
->cl_flags
))
1874 rb_erase(&clp
->cl_namenode
, &nn
->conf_name_tree
);
1876 rb_erase(&clp
->cl_namenode
, &nn
->unconf_name_tree
);
1878 list_del_init(&clp
->cl_lru
);
1879 spin_lock(&clp
->cl_lock
);
1880 list_for_each_entry(ses
, &clp
->cl_sessions
, se_perclnt
)
1881 list_del_init(&ses
->se_hash
);
1882 spin_unlock(&clp
->cl_lock
);
1886 unhash_client(struct nfs4_client
*clp
)
1888 struct nfsd_net
*nn
= net_generic(clp
->net
, nfsd_net_id
);
1890 spin_lock(&nn
->client_lock
);
1891 unhash_client_locked(clp
);
1892 spin_unlock(&nn
->client_lock
);
1895 static __be32
mark_client_expired_locked(struct nfs4_client
*clp
)
1897 if (atomic_read(&clp
->cl_refcount
))
1898 return nfserr_jukebox
;
1899 unhash_client_locked(clp
);
1904 __destroy_client(struct nfs4_client
*clp
)
1907 struct nfs4_openowner
*oo
;
1908 struct nfs4_delegation
*dp
;
1909 struct list_head reaplist
;
1911 INIT_LIST_HEAD(&reaplist
);
1912 spin_lock(&state_lock
);
1913 while (!list_empty(&clp
->cl_delegations
)) {
1914 dp
= list_entry(clp
->cl_delegations
.next
, struct nfs4_delegation
, dl_perclnt
);
1915 WARN_ON(!unhash_delegation_locked(dp
));
1916 list_add(&dp
->dl_recall_lru
, &reaplist
);
1918 spin_unlock(&state_lock
);
1919 while (!list_empty(&reaplist
)) {
1920 dp
= list_entry(reaplist
.next
, struct nfs4_delegation
, dl_recall_lru
);
1921 list_del_init(&dp
->dl_recall_lru
);
1922 destroy_unhashed_deleg(dp
);
1924 while (!list_empty(&clp
->cl_revoked
)) {
1925 dp
= list_entry(clp
->cl_revoked
.next
, struct nfs4_delegation
, dl_recall_lru
);
1926 list_del_init(&dp
->dl_recall_lru
);
1927 nfs4_put_stid(&dp
->dl_stid
);
1929 while (!list_empty(&clp
->cl_openowners
)) {
1930 oo
= list_entry(clp
->cl_openowners
.next
, struct nfs4_openowner
, oo_perclient
);
1931 nfs4_get_stateowner(&oo
->oo_owner
);
1932 release_openowner(oo
);
1934 for (i
= 0; i
< OWNER_HASH_SIZE
; i
++) {
1935 struct nfs4_stateowner
*so
, *tmp
;
1937 list_for_each_entry_safe(so
, tmp
, &clp
->cl_ownerstr_hashtbl
[i
],
1939 /* Should be no openowners at this point */
1940 WARN_ON_ONCE(so
->so_is_open_owner
);
1941 remove_blocked_locks(lockowner(so
));
1944 nfsd4_return_all_client_layouts(clp
);
1945 nfsd4_shutdown_callback(clp
);
1946 if (clp
->cl_cb_conn
.cb_xprt
)
1947 svc_xprt_put(clp
->cl_cb_conn
.cb_xprt
);
1952 destroy_client(struct nfs4_client
*clp
)
1955 __destroy_client(clp
);
1958 static void expire_client(struct nfs4_client
*clp
)
1961 nfsd4_client_record_remove(clp
);
1962 __destroy_client(clp
);
1965 static void copy_verf(struct nfs4_client
*target
, nfs4_verifier
*source
)
1967 memcpy(target
->cl_verifier
.data
, source
->data
,
1968 sizeof(target
->cl_verifier
.data
));
1971 static void copy_clid(struct nfs4_client
*target
, struct nfs4_client
*source
)
1973 target
->cl_clientid
.cl_boot
= source
->cl_clientid
.cl_boot
;
1974 target
->cl_clientid
.cl_id
= source
->cl_clientid
.cl_id
;
1977 static int copy_cred(struct svc_cred
*target
, struct svc_cred
*source
)
1979 target
->cr_principal
= kstrdup(source
->cr_principal
, GFP_KERNEL
);
1980 target
->cr_raw_principal
= kstrdup(source
->cr_raw_principal
,
1982 target
->cr_targ_princ
= kstrdup(source
->cr_targ_princ
, GFP_KERNEL
);
1983 if ((source
->cr_principal
&& !target
->cr_principal
) ||
1984 (source
->cr_raw_principal
&& !target
->cr_raw_principal
) ||
1985 (source
->cr_targ_princ
&& !target
->cr_targ_princ
))
1988 target
->cr_flavor
= source
->cr_flavor
;
1989 target
->cr_uid
= source
->cr_uid
;
1990 target
->cr_gid
= source
->cr_gid
;
1991 target
->cr_group_info
= source
->cr_group_info
;
1992 get_group_info(target
->cr_group_info
);
1993 target
->cr_gss_mech
= source
->cr_gss_mech
;
1994 if (source
->cr_gss_mech
)
1995 gss_mech_get(source
->cr_gss_mech
);
2000 compare_blob(const struct xdr_netobj
*o1
, const struct xdr_netobj
*o2
)
2002 if (o1
->len
< o2
->len
)
2004 if (o1
->len
> o2
->len
)
2006 return memcmp(o1
->data
, o2
->data
, o1
->len
);
2009 static int same_name(const char *n1
, const char *n2
)
2011 return 0 == memcmp(n1
, n2
, HEXDIR_LEN
);
2015 same_verf(nfs4_verifier
*v1
, nfs4_verifier
*v2
)
2017 return 0 == memcmp(v1
->data
, v2
->data
, sizeof(v1
->data
));
2021 same_clid(clientid_t
*cl1
, clientid_t
*cl2
)
2023 return (cl1
->cl_boot
== cl2
->cl_boot
) && (cl1
->cl_id
== cl2
->cl_id
);
2026 static bool groups_equal(struct group_info
*g1
, struct group_info
*g2
)
2030 if (g1
->ngroups
!= g2
->ngroups
)
2032 for (i
=0; i
<g1
->ngroups
; i
++)
2033 if (!gid_eq(g1
->gid
[i
], g2
->gid
[i
]))
2039 * RFC 3530 language requires clid_inuse be returned when the
2040 * "principal" associated with a requests differs from that previously
2041 * used. We use uid, gid's, and gss principal string as our best
2042 * approximation. We also don't want to allow non-gss use of a client
2043 * established using gss: in theory cr_principal should catch that
2044 * change, but in practice cr_principal can be null even in the gss case
2045 * since gssd doesn't always pass down a principal string.
2047 static bool is_gss_cred(struct svc_cred
*cr
)
2049 /* Is cr_flavor one of the gss "pseudoflavors"?: */
2050 return (cr
->cr_flavor
> RPC_AUTH_MAXFLAVOR
);
2055 same_creds(struct svc_cred
*cr1
, struct svc_cred
*cr2
)
2057 if ((is_gss_cred(cr1
) != is_gss_cred(cr2
))
2058 || (!uid_eq(cr1
->cr_uid
, cr2
->cr_uid
))
2059 || (!gid_eq(cr1
->cr_gid
, cr2
->cr_gid
))
2060 || !groups_equal(cr1
->cr_group_info
, cr2
->cr_group_info
))
2062 /* XXX: check that cr_targ_princ fields match ? */
2063 if (cr1
->cr_principal
== cr2
->cr_principal
)
2065 if (!cr1
->cr_principal
|| !cr2
->cr_principal
)
2067 return 0 == strcmp(cr1
->cr_principal
, cr2
->cr_principal
);
2070 static bool svc_rqst_integrity_protected(struct svc_rqst
*rqstp
)
2072 struct svc_cred
*cr
= &rqstp
->rq_cred
;
2075 if (!cr
->cr_gss_mech
)
2077 service
= gss_pseudoflavor_to_service(cr
->cr_gss_mech
, cr
->cr_flavor
);
2078 return service
== RPC_GSS_SVC_INTEGRITY
||
2079 service
== RPC_GSS_SVC_PRIVACY
;
2082 bool nfsd4_mach_creds_match(struct nfs4_client
*cl
, struct svc_rqst
*rqstp
)
2084 struct svc_cred
*cr
= &rqstp
->rq_cred
;
2086 if (!cl
->cl_mach_cred
)
2088 if (cl
->cl_cred
.cr_gss_mech
!= cr
->cr_gss_mech
)
2090 if (!svc_rqst_integrity_protected(rqstp
))
2092 if (cl
->cl_cred
.cr_raw_principal
)
2093 return 0 == strcmp(cl
->cl_cred
.cr_raw_principal
,
2094 cr
->cr_raw_principal
);
2095 if (!cr
->cr_principal
)
2097 return 0 == strcmp(cl
->cl_cred
.cr_principal
, cr
->cr_principal
);
2100 static void gen_confirm(struct nfs4_client
*clp
, struct nfsd_net
*nn
)
2105 * This is opaque to client, so no need to byte-swap. Use
2106 * __force to keep sparse happy
2108 verf
[0] = (__force __be32
)get_seconds();
2109 verf
[1] = (__force __be32
)nn
->clverifier_counter
++;
2110 memcpy(clp
->cl_confirm
.data
, verf
, sizeof(clp
->cl_confirm
.data
));
2113 static void gen_clid(struct nfs4_client
*clp
, struct nfsd_net
*nn
)
2115 clp
->cl_clientid
.cl_boot
= nn
->boot_time
;
2116 clp
->cl_clientid
.cl_id
= nn
->clientid_counter
++;
2117 gen_confirm(clp
, nn
);
2120 static struct nfs4_stid
*
2121 find_stateid_locked(struct nfs4_client
*cl
, stateid_t
*t
)
2123 struct nfs4_stid
*ret
;
2125 ret
= idr_find(&cl
->cl_stateids
, t
->si_opaque
.so_id
);
2126 if (!ret
|| !ret
->sc_type
)
2131 static struct nfs4_stid
*
2132 find_stateid_by_type(struct nfs4_client
*cl
, stateid_t
*t
, char typemask
)
2134 struct nfs4_stid
*s
;
2136 spin_lock(&cl
->cl_lock
);
2137 s
= find_stateid_locked(cl
, t
);
2139 if (typemask
& s
->sc_type
)
2140 refcount_inc(&s
->sc_count
);
2144 spin_unlock(&cl
->cl_lock
);
2148 static struct nfs4_client
*create_client(struct xdr_netobj name
,
2149 struct svc_rqst
*rqstp
, nfs4_verifier
*verf
)
2151 struct nfs4_client
*clp
;
2152 struct sockaddr
*sa
= svc_addr(rqstp
);
2154 struct net
*net
= SVC_NET(rqstp
);
2156 clp
= alloc_client(name
);
2160 ret
= copy_cred(&clp
->cl_cred
, &rqstp
->rq_cred
);
2165 nfsd4_init_cb(&clp
->cl_cb_null
, clp
, NULL
, NFSPROC4_CLNT_CB_NULL
);
2166 clp
->cl_time
= get_seconds();
2167 clear_bit(0, &clp
->cl_cb_slot_busy
);
2168 copy_verf(clp
, verf
);
2169 rpc_copy_addr((struct sockaddr
*) &clp
->cl_addr
, sa
);
2170 clp
->cl_cb_session
= NULL
;
2176 add_clp_to_name_tree(struct nfs4_client
*new_clp
, struct rb_root
*root
)
2178 struct rb_node
**new = &(root
->rb_node
), *parent
= NULL
;
2179 struct nfs4_client
*clp
;
2182 clp
= rb_entry(*new, struct nfs4_client
, cl_namenode
);
2185 if (compare_blob(&clp
->cl_name
, &new_clp
->cl_name
) > 0)
2186 new = &((*new)->rb_left
);
2188 new = &((*new)->rb_right
);
2191 rb_link_node(&new_clp
->cl_namenode
, parent
, new);
2192 rb_insert_color(&new_clp
->cl_namenode
, root
);
2195 static struct nfs4_client
*
2196 find_clp_in_name_tree(struct xdr_netobj
*name
, struct rb_root
*root
)
2199 struct rb_node
*node
= root
->rb_node
;
2200 struct nfs4_client
*clp
;
2203 clp
= rb_entry(node
, struct nfs4_client
, cl_namenode
);
2204 cmp
= compare_blob(&clp
->cl_name
, name
);
2206 node
= node
->rb_left
;
2208 node
= node
->rb_right
;
2216 add_to_unconfirmed(struct nfs4_client
*clp
)
2218 unsigned int idhashval
;
2219 struct nfsd_net
*nn
= net_generic(clp
->net
, nfsd_net_id
);
2221 lockdep_assert_held(&nn
->client_lock
);
2223 clear_bit(NFSD4_CLIENT_CONFIRMED
, &clp
->cl_flags
);
2224 add_clp_to_name_tree(clp
, &nn
->unconf_name_tree
);
2225 idhashval
= clientid_hashval(clp
->cl_clientid
.cl_id
);
2226 list_add(&clp
->cl_idhash
, &nn
->unconf_id_hashtbl
[idhashval
]);
2227 renew_client_locked(clp
);
2231 move_to_confirmed(struct nfs4_client
*clp
)
2233 unsigned int idhashval
= clientid_hashval(clp
->cl_clientid
.cl_id
);
2234 struct nfsd_net
*nn
= net_generic(clp
->net
, nfsd_net_id
);
2236 lockdep_assert_held(&nn
->client_lock
);
2238 dprintk("NFSD: move_to_confirm nfs4_client %p\n", clp
);
2239 list_move(&clp
->cl_idhash
, &nn
->conf_id_hashtbl
[idhashval
]);
2240 rb_erase(&clp
->cl_namenode
, &nn
->unconf_name_tree
);
2241 add_clp_to_name_tree(clp
, &nn
->conf_name_tree
);
2242 set_bit(NFSD4_CLIENT_CONFIRMED
, &clp
->cl_flags
);
2243 renew_client_locked(clp
);
2246 static struct nfs4_client
*
2247 find_client_in_id_table(struct list_head
*tbl
, clientid_t
*clid
, bool sessions
)
2249 struct nfs4_client
*clp
;
2250 unsigned int idhashval
= clientid_hashval(clid
->cl_id
);
2252 list_for_each_entry(clp
, &tbl
[idhashval
], cl_idhash
) {
2253 if (same_clid(&clp
->cl_clientid
, clid
)) {
2254 if ((bool)clp
->cl_minorversion
!= sessions
)
2256 renew_client_locked(clp
);
2263 static struct nfs4_client
*
2264 find_confirmed_client(clientid_t
*clid
, bool sessions
, struct nfsd_net
*nn
)
2266 struct list_head
*tbl
= nn
->conf_id_hashtbl
;
2268 lockdep_assert_held(&nn
->client_lock
);
2269 return find_client_in_id_table(tbl
, clid
, sessions
);
2272 static struct nfs4_client
*
2273 find_unconfirmed_client(clientid_t
*clid
, bool sessions
, struct nfsd_net
*nn
)
2275 struct list_head
*tbl
= nn
->unconf_id_hashtbl
;
2277 lockdep_assert_held(&nn
->client_lock
);
2278 return find_client_in_id_table(tbl
, clid
, sessions
);
2281 static bool clp_used_exchangeid(struct nfs4_client
*clp
)
2283 return clp
->cl_exchange_flags
!= 0;
2286 static struct nfs4_client
*
2287 find_confirmed_client_by_name(struct xdr_netobj
*name
, struct nfsd_net
*nn
)
2289 lockdep_assert_held(&nn
->client_lock
);
2290 return find_clp_in_name_tree(name
, &nn
->conf_name_tree
);
2293 static struct nfs4_client
*
2294 find_unconfirmed_client_by_name(struct xdr_netobj
*name
, struct nfsd_net
*nn
)
2296 lockdep_assert_held(&nn
->client_lock
);
2297 return find_clp_in_name_tree(name
, &nn
->unconf_name_tree
);
2301 gen_callback(struct nfs4_client
*clp
, struct nfsd4_setclientid
*se
, struct svc_rqst
*rqstp
)
2303 struct nfs4_cb_conn
*conn
= &clp
->cl_cb_conn
;
2304 struct sockaddr
*sa
= svc_addr(rqstp
);
2305 u32 scopeid
= rpc_get_scope_id(sa
);
2306 unsigned short expected_family
;
2308 /* Currently, we only support tcp and tcp6 for the callback channel */
2309 if (se
->se_callback_netid_len
== 3 &&
2310 !memcmp(se
->se_callback_netid_val
, "tcp", 3))
2311 expected_family
= AF_INET
;
2312 else if (se
->se_callback_netid_len
== 4 &&
2313 !memcmp(se
->se_callback_netid_val
, "tcp6", 4))
2314 expected_family
= AF_INET6
;
2318 conn
->cb_addrlen
= rpc_uaddr2sockaddr(clp
->net
, se
->se_callback_addr_val
,
2319 se
->se_callback_addr_len
,
2320 (struct sockaddr
*)&conn
->cb_addr
,
2321 sizeof(conn
->cb_addr
));
2323 if (!conn
->cb_addrlen
|| conn
->cb_addr
.ss_family
!= expected_family
)
2326 if (conn
->cb_addr
.ss_family
== AF_INET6
)
2327 ((struct sockaddr_in6
*)&conn
->cb_addr
)->sin6_scope_id
= scopeid
;
2329 conn
->cb_prog
= se
->se_callback_prog
;
2330 conn
->cb_ident
= se
->se_callback_ident
;
2331 memcpy(&conn
->cb_saddr
, &rqstp
->rq_daddr
, rqstp
->rq_daddrlen
);
2334 conn
->cb_addr
.ss_family
= AF_UNSPEC
;
2335 conn
->cb_addrlen
= 0;
2336 dprintk("NFSD: this client (clientid %08x/%08x) "
2337 "will not receive delegations\n",
2338 clp
->cl_clientid
.cl_boot
, clp
->cl_clientid
.cl_id
);
2344 * Cache a reply. nfsd4_check_resp_size() has bounded the cache size.
2347 nfsd4_store_cache_entry(struct nfsd4_compoundres
*resp
)
2349 struct xdr_buf
*buf
= resp
->xdr
.buf
;
2350 struct nfsd4_slot
*slot
= resp
->cstate
.slot
;
2353 dprintk("--> %s slot %p\n", __func__
, slot
);
2355 slot
->sl_flags
|= NFSD4_SLOT_INITIALIZED
;
2356 slot
->sl_opcnt
= resp
->opcnt
;
2357 slot
->sl_status
= resp
->cstate
.status
;
2358 free_svc_cred(&slot
->sl_cred
);
2359 copy_cred(&slot
->sl_cred
, &resp
->rqstp
->rq_cred
);
2361 if (!nfsd4_cache_this(resp
)) {
2362 slot
->sl_flags
&= ~NFSD4_SLOT_CACHED
;
2365 slot
->sl_flags
|= NFSD4_SLOT_CACHED
;
2367 base
= resp
->cstate
.data_offset
;
2368 slot
->sl_datalen
= buf
->len
- base
;
2369 if (read_bytes_from_xdr_buf(buf
, base
, slot
->sl_data
, slot
->sl_datalen
))
2370 WARN(1, "%s: sessions DRC could not cache compound\n",
2376 * Encode the replay sequence operation from the slot values.
2377 * If cachethis is FALSE encode the uncached rep error on the next
2378 * operation which sets resp->p and increments resp->opcnt for
2379 * nfs4svc_encode_compoundres.
2383 nfsd4_enc_sequence_replay(struct nfsd4_compoundargs
*args
,
2384 struct nfsd4_compoundres
*resp
)
2386 struct nfsd4_op
*op
;
2387 struct nfsd4_slot
*slot
= resp
->cstate
.slot
;
2389 /* Encode the replayed sequence operation */
2390 op
= &args
->ops
[resp
->opcnt
- 1];
2391 nfsd4_encode_operation(resp
, op
);
2393 if (slot
->sl_flags
& NFSD4_SLOT_CACHED
)
2395 if (args
->opcnt
== 1) {
2397 * The original operation wasn't a solo sequence--we
2398 * always cache those--so this retry must not match the
2401 op
->status
= nfserr_seq_false_retry
;
2403 op
= &args
->ops
[resp
->opcnt
++];
2404 op
->status
= nfserr_retry_uncached_rep
;
2405 nfsd4_encode_operation(resp
, op
);
2411 * The sequence operation is not cached because we can use the slot and
2415 nfsd4_replay_cache_entry(struct nfsd4_compoundres
*resp
,
2416 struct nfsd4_sequence
*seq
)
2418 struct nfsd4_slot
*slot
= resp
->cstate
.slot
;
2419 struct xdr_stream
*xdr
= &resp
->xdr
;
2423 dprintk("--> %s slot %p\n", __func__
, slot
);
2425 status
= nfsd4_enc_sequence_replay(resp
->rqstp
->rq_argp
, resp
);
2429 p
= xdr_reserve_space(xdr
, slot
->sl_datalen
);
2432 return nfserr_serverfault
;
2434 xdr_encode_opaque_fixed(p
, slot
->sl_data
, slot
->sl_datalen
);
2435 xdr_commit_encode(xdr
);
2437 resp
->opcnt
= slot
->sl_opcnt
;
2438 return slot
->sl_status
;
2442 * Set the exchange_id flags returned by the server.
2445 nfsd4_set_ex_flags(struct nfs4_client
*new, struct nfsd4_exchange_id
*clid
)
2447 #ifdef CONFIG_NFSD_PNFS
2448 new->cl_exchange_flags
|= EXCHGID4_FLAG_USE_PNFS_MDS
;
2450 new->cl_exchange_flags
|= EXCHGID4_FLAG_USE_NON_PNFS
;
2453 /* Referrals are supported, Migration is not. */
2454 new->cl_exchange_flags
|= EXCHGID4_FLAG_SUPP_MOVED_REFER
;
2456 /* set the wire flags to return to client. */
2457 clid
->flags
= new->cl_exchange_flags
;
2460 static bool client_has_openowners(struct nfs4_client
*clp
)
2462 struct nfs4_openowner
*oo
;
2464 list_for_each_entry(oo
, &clp
->cl_openowners
, oo_perclient
) {
2465 if (!list_empty(&oo
->oo_owner
.so_stateids
))
2471 static bool client_has_state(struct nfs4_client
*clp
)
2473 return client_has_openowners(clp
)
2474 #ifdef CONFIG_NFSD_PNFS
2475 || !list_empty(&clp
->cl_lo_states
)
2477 || !list_empty(&clp
->cl_delegations
)
2478 || !list_empty(&clp
->cl_sessions
);
2482 nfsd4_exchange_id(struct svc_rqst
*rqstp
, struct nfsd4_compound_state
*cstate
,
2483 union nfsd4_op_u
*u
)
2485 struct nfsd4_exchange_id
*exid
= &u
->exchange_id
;
2486 struct nfs4_client
*conf
, *new;
2487 struct nfs4_client
*unconf
= NULL
;
2489 char addr_str
[INET6_ADDRSTRLEN
];
2490 nfs4_verifier verf
= exid
->verifier
;
2491 struct sockaddr
*sa
= svc_addr(rqstp
);
2492 bool update
= exid
->flags
& EXCHGID4_FLAG_UPD_CONFIRMED_REC_A
;
2493 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
2495 rpc_ntop(sa
, addr_str
, sizeof(addr_str
));
2496 dprintk("%s rqstp=%p exid=%p clname.len=%u clname.data=%p "
2497 "ip_addr=%s flags %x, spa_how %d\n",
2498 __func__
, rqstp
, exid
, exid
->clname
.len
, exid
->clname
.data
,
2499 addr_str
, exid
->flags
, exid
->spa_how
);
2501 if (exid
->flags
& ~EXCHGID4_FLAG_MASK_A
)
2502 return nfserr_inval
;
2504 new = create_client(exid
->clname
, rqstp
, &verf
);
2506 return nfserr_jukebox
;
2508 switch (exid
->spa_how
) {
2510 exid
->spo_must_enforce
[0] = 0;
2511 exid
->spo_must_enforce
[1] = (
2512 1 << (OP_BIND_CONN_TO_SESSION
- 32) |
2513 1 << (OP_EXCHANGE_ID
- 32) |
2514 1 << (OP_CREATE_SESSION
- 32) |
2515 1 << (OP_DESTROY_SESSION
- 32) |
2516 1 << (OP_DESTROY_CLIENTID
- 32));
2518 exid
->spo_must_allow
[0] &= (1 << (OP_CLOSE
) |
2519 1 << (OP_OPEN_DOWNGRADE
) |
2521 1 << (OP_DELEGRETURN
));
2523 exid
->spo_must_allow
[1] &= (
2524 1 << (OP_TEST_STATEID
- 32) |
2525 1 << (OP_FREE_STATEID
- 32));
2526 if (!svc_rqst_integrity_protected(rqstp
)) {
2527 status
= nfserr_inval
;
2531 * Sometimes userspace doesn't give us a principal.
2532 * Which is a bug, really. Anyway, we can't enforce
2533 * MACH_CRED in that case, better to give up now:
2535 if (!new->cl_cred
.cr_principal
&&
2536 !new->cl_cred
.cr_raw_principal
) {
2537 status
= nfserr_serverfault
;
2540 new->cl_mach_cred
= true;
2543 default: /* checked by xdr code */
2546 status
= nfserr_encr_alg_unsupp
;
2550 /* Cases below refer to rfc 5661 section 18.35.4: */
2551 spin_lock(&nn
->client_lock
);
2552 conf
= find_confirmed_client_by_name(&exid
->clname
, nn
);
2554 bool creds_match
= same_creds(&conf
->cl_cred
, &rqstp
->rq_cred
);
2555 bool verfs_match
= same_verf(&verf
, &conf
->cl_verifier
);
2558 if (!clp_used_exchangeid(conf
)) { /* buggy client */
2559 status
= nfserr_inval
;
2562 if (!nfsd4_mach_creds_match(conf
, rqstp
)) {
2563 status
= nfserr_wrong_cred
;
2566 if (!creds_match
) { /* case 9 */
2567 status
= nfserr_perm
;
2570 if (!verfs_match
) { /* case 8 */
2571 status
= nfserr_not_same
;
2575 exid
->flags
|= EXCHGID4_FLAG_CONFIRMED_R
;
2578 if (!creds_match
) { /* case 3 */
2579 if (client_has_state(conf
)) {
2580 status
= nfserr_clid_inuse
;
2585 if (verfs_match
) { /* case 2 */
2586 conf
->cl_exchange_flags
|= EXCHGID4_FLAG_CONFIRMED_R
;
2589 /* case 5, client reboot */
2594 if (update
) { /* case 7 */
2595 status
= nfserr_noent
;
2599 unconf
= find_unconfirmed_client_by_name(&exid
->clname
, nn
);
2600 if (unconf
) /* case 4, possible retry or client restart */
2601 unhash_client_locked(unconf
);
2603 /* case 1 (normal case) */
2606 status
= mark_client_expired_locked(conf
);
2610 new->cl_minorversion
= cstate
->minorversion
;
2611 new->cl_spo_must_allow
.u
.words
[0] = exid
->spo_must_allow
[0];
2612 new->cl_spo_must_allow
.u
.words
[1] = exid
->spo_must_allow
[1];
2615 add_to_unconfirmed(new);
2618 exid
->clientid
.cl_boot
= conf
->cl_clientid
.cl_boot
;
2619 exid
->clientid
.cl_id
= conf
->cl_clientid
.cl_id
;
2621 exid
->seqid
= conf
->cl_cs_slot
.sl_seqid
+ 1;
2622 nfsd4_set_ex_flags(conf
, exid
);
2624 dprintk("nfsd4_exchange_id seqid %d flags %x\n",
2625 conf
->cl_cs_slot
.sl_seqid
, conf
->cl_exchange_flags
);
2629 spin_unlock(&nn
->client_lock
);
2634 expire_client(unconf
);
2639 check_slot_seqid(u32 seqid
, u32 slot_seqid
, int slot_inuse
)
2641 dprintk("%s enter. seqid %d slot_seqid %d\n", __func__
, seqid
,
2644 /* The slot is in use, and no response has been sent. */
2646 if (seqid
== slot_seqid
)
2647 return nfserr_jukebox
;
2649 return nfserr_seq_misordered
;
2651 /* Note unsigned 32-bit arithmetic handles wraparound: */
2652 if (likely(seqid
== slot_seqid
+ 1))
2654 if (seqid
== slot_seqid
)
2655 return nfserr_replay_cache
;
2656 return nfserr_seq_misordered
;
2660 * Cache the create session result into the create session single DRC
2661 * slot cache by saving the xdr structure. sl_seqid has been set.
2662 * Do this for solo or embedded create session operations.
2665 nfsd4_cache_create_session(struct nfsd4_create_session
*cr_ses
,
2666 struct nfsd4_clid_slot
*slot
, __be32 nfserr
)
2668 slot
->sl_status
= nfserr
;
2669 memcpy(&slot
->sl_cr_ses
, cr_ses
, sizeof(*cr_ses
));
2673 nfsd4_replay_create_session(struct nfsd4_create_session
*cr_ses
,
2674 struct nfsd4_clid_slot
*slot
)
2676 memcpy(cr_ses
, &slot
->sl_cr_ses
, sizeof(*cr_ses
));
2677 return slot
->sl_status
;
2680 #define NFSD_MIN_REQ_HDR_SEQ_SZ ((\
2681 2 * 2 + /* credential,verifier: AUTH_NULL, length 0 */ \
2682 1 + /* MIN tag is length with zero, only length */ \
2683 3 + /* version, opcount, opcode */ \
2684 XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
2685 /* seqid, slotID, slotID, cache */ \
2686 4 ) * sizeof(__be32))
2688 #define NFSD_MIN_RESP_HDR_SEQ_SZ ((\
2689 2 + /* verifier: AUTH_NULL, length 0 */\
2691 1 + /* MIN tag is length with zero, only length */ \
2692 3 + /* opcount, opcode, opstatus*/ \
2693 XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
2694 /* seqid, slotID, slotID, slotID, status */ \
2695 5 ) * sizeof(__be32))
2697 static __be32
check_forechannel_attrs(struct nfsd4_channel_attrs
*ca
, struct nfsd_net
*nn
)
2699 u32 maxrpc
= nn
->nfsd_serv
->sv_max_mesg
;
2701 if (ca
->maxreq_sz
< NFSD_MIN_REQ_HDR_SEQ_SZ
)
2702 return nfserr_toosmall
;
2703 if (ca
->maxresp_sz
< NFSD_MIN_RESP_HDR_SEQ_SZ
)
2704 return nfserr_toosmall
;
2705 ca
->headerpadsz
= 0;
2706 ca
->maxreq_sz
= min_t(u32
, ca
->maxreq_sz
, maxrpc
);
2707 ca
->maxresp_sz
= min_t(u32
, ca
->maxresp_sz
, maxrpc
);
2708 ca
->maxops
= min_t(u32
, ca
->maxops
, NFSD_MAX_OPS_PER_COMPOUND
);
2709 ca
->maxresp_cached
= min_t(u32
, ca
->maxresp_cached
,
2710 NFSD_SLOT_CACHE_SIZE
+ NFSD_MIN_HDR_SEQ_SZ
);
2711 ca
->maxreqs
= min_t(u32
, ca
->maxreqs
, NFSD_MAX_SLOTS_PER_SESSION
);
2713 * Note decreasing slot size below client's request may make it
2714 * difficult for client to function correctly, whereas
2715 * decreasing the number of slots will (just?) affect
2716 * performance. When short on memory we therefore prefer to
2717 * decrease number of slots instead of their size. Clients that
2718 * request larger slots than they need will get poor results:
2720 ca
->maxreqs
= nfsd4_get_drc_mem(ca
);
2722 return nfserr_jukebox
;
2728 * Server's NFSv4.1 backchannel support is AUTH_SYS-only for now.
2729 * These are based on similar macros in linux/sunrpc/msg_prot.h .
2731 #define RPC_MAX_HEADER_WITH_AUTH_SYS \
2732 (RPC_CALLHDRSIZE + 2 * (2 + UNX_CALLSLACK))
2734 #define RPC_MAX_REPHEADER_WITH_AUTH_SYS \
2735 (RPC_REPHDRSIZE + (2 + NUL_REPLYSLACK))
2737 #define NFSD_CB_MAX_REQ_SZ ((NFS4_enc_cb_recall_sz + \
2738 RPC_MAX_HEADER_WITH_AUTH_SYS) * sizeof(__be32))
2739 #define NFSD_CB_MAX_RESP_SZ ((NFS4_dec_cb_recall_sz + \
2740 RPC_MAX_REPHEADER_WITH_AUTH_SYS) * \
2743 static __be32
check_backchannel_attrs(struct nfsd4_channel_attrs
*ca
)
2745 ca
->headerpadsz
= 0;
2747 if (ca
->maxreq_sz
< NFSD_CB_MAX_REQ_SZ
)
2748 return nfserr_toosmall
;
2749 if (ca
->maxresp_sz
< NFSD_CB_MAX_RESP_SZ
)
2750 return nfserr_toosmall
;
2751 ca
->maxresp_cached
= 0;
2753 return nfserr_toosmall
;
2758 static __be32
nfsd4_check_cb_sec(struct nfsd4_cb_sec
*cbs
)
2760 switch (cbs
->flavor
) {
2766 * GSS case: the spec doesn't allow us to return this
2767 * error. But it also doesn't allow us not to support
2769 * I'd rather this fail hard than return some error the
2770 * client might think it can already handle:
2772 return nfserr_encr_alg_unsupp
;
2777 nfsd4_create_session(struct svc_rqst
*rqstp
,
2778 struct nfsd4_compound_state
*cstate
, union nfsd4_op_u
*u
)
2780 struct nfsd4_create_session
*cr_ses
= &u
->create_session
;
2781 struct sockaddr
*sa
= svc_addr(rqstp
);
2782 struct nfs4_client
*conf
, *unconf
;
2783 struct nfs4_client
*old
= NULL
;
2784 struct nfsd4_session
*new;
2785 struct nfsd4_conn
*conn
;
2786 struct nfsd4_clid_slot
*cs_slot
= NULL
;
2788 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
2790 if (cr_ses
->flags
& ~SESSION4_FLAG_MASK_A
)
2791 return nfserr_inval
;
2792 status
= nfsd4_check_cb_sec(&cr_ses
->cb_sec
);
2795 status
= check_forechannel_attrs(&cr_ses
->fore_channel
, nn
);
2798 status
= check_backchannel_attrs(&cr_ses
->back_channel
);
2800 goto out_release_drc_mem
;
2801 status
= nfserr_jukebox
;
2802 new = alloc_session(&cr_ses
->fore_channel
, &cr_ses
->back_channel
);
2804 goto out_release_drc_mem
;
2805 conn
= alloc_conn_from_crses(rqstp
, cr_ses
);
2807 goto out_free_session
;
2809 spin_lock(&nn
->client_lock
);
2810 unconf
= find_unconfirmed_client(&cr_ses
->clientid
, true, nn
);
2811 conf
= find_confirmed_client(&cr_ses
->clientid
, true, nn
);
2812 WARN_ON_ONCE(conf
&& unconf
);
2815 status
= nfserr_wrong_cred
;
2816 if (!nfsd4_mach_creds_match(conf
, rqstp
))
2818 cs_slot
= &conf
->cl_cs_slot
;
2819 status
= check_slot_seqid(cr_ses
->seqid
, cs_slot
->sl_seqid
, 0);
2821 if (status
== nfserr_replay_cache
)
2822 status
= nfsd4_replay_create_session(cr_ses
, cs_slot
);
2825 } else if (unconf
) {
2826 if (!same_creds(&unconf
->cl_cred
, &rqstp
->rq_cred
) ||
2827 !rpc_cmp_addr(sa
, (struct sockaddr
*) &unconf
->cl_addr
)) {
2828 status
= nfserr_clid_inuse
;
2831 status
= nfserr_wrong_cred
;
2832 if (!nfsd4_mach_creds_match(unconf
, rqstp
))
2834 cs_slot
= &unconf
->cl_cs_slot
;
2835 status
= check_slot_seqid(cr_ses
->seqid
, cs_slot
->sl_seqid
, 0);
2837 /* an unconfirmed replay returns misordered */
2838 status
= nfserr_seq_misordered
;
2841 old
= find_confirmed_client_by_name(&unconf
->cl_name
, nn
);
2843 status
= mark_client_expired_locked(old
);
2849 move_to_confirmed(unconf
);
2852 status
= nfserr_stale_clientid
;
2856 /* Persistent sessions are not supported */
2857 cr_ses
->flags
&= ~SESSION4_PERSIST
;
2858 /* Upshifting from TCP to RDMA is not supported */
2859 cr_ses
->flags
&= ~SESSION4_RDMA
;
2861 init_session(rqstp
, new, conf
, cr_ses
);
2862 nfsd4_get_session_locked(new);
2864 memcpy(cr_ses
->sessionid
.data
, new->se_sessionid
.data
,
2865 NFS4_MAX_SESSIONID_LEN
);
2866 cs_slot
->sl_seqid
++;
2867 cr_ses
->seqid
= cs_slot
->sl_seqid
;
2869 /* cache solo and embedded create sessions under the client_lock */
2870 nfsd4_cache_create_session(cr_ses
, cs_slot
, status
);
2871 spin_unlock(&nn
->client_lock
);
2872 /* init connection and backchannel */
2873 nfsd4_init_conn(rqstp
, conn
, new);
2874 nfsd4_put_session(new);
2879 spin_unlock(&nn
->client_lock
);
2884 __free_session(new);
2885 out_release_drc_mem
:
2886 nfsd4_put_drc_mem(&cr_ses
->fore_channel
);
2890 static __be32
nfsd4_map_bcts_dir(u32
*dir
)
2893 case NFS4_CDFC4_FORE
:
2894 case NFS4_CDFC4_BACK
:
2896 case NFS4_CDFC4_FORE_OR_BOTH
:
2897 case NFS4_CDFC4_BACK_OR_BOTH
:
2898 *dir
= NFS4_CDFC4_BOTH
;
2901 return nfserr_inval
;
2904 __be32
nfsd4_backchannel_ctl(struct svc_rqst
*rqstp
,
2905 struct nfsd4_compound_state
*cstate
,
2906 union nfsd4_op_u
*u
)
2908 struct nfsd4_backchannel_ctl
*bc
= &u
->backchannel_ctl
;
2909 struct nfsd4_session
*session
= cstate
->session
;
2910 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
2913 status
= nfsd4_check_cb_sec(&bc
->bc_cb_sec
);
2916 spin_lock(&nn
->client_lock
);
2917 session
->se_cb_prog
= bc
->bc_cb_program
;
2918 session
->se_cb_sec
= bc
->bc_cb_sec
;
2919 spin_unlock(&nn
->client_lock
);
2921 nfsd4_probe_callback(session
->se_client
);
2926 __be32
nfsd4_bind_conn_to_session(struct svc_rqst
*rqstp
,
2927 struct nfsd4_compound_state
*cstate
,
2928 union nfsd4_op_u
*u
)
2930 struct nfsd4_bind_conn_to_session
*bcts
= &u
->bind_conn_to_session
;
2932 struct nfsd4_conn
*conn
;
2933 struct nfsd4_session
*session
;
2934 struct net
*net
= SVC_NET(rqstp
);
2935 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
2937 if (!nfsd4_last_compound_op(rqstp
))
2938 return nfserr_not_only_op
;
2939 spin_lock(&nn
->client_lock
);
2940 session
= find_in_sessionid_hashtbl(&bcts
->sessionid
, net
, &status
);
2941 spin_unlock(&nn
->client_lock
);
2943 goto out_no_session
;
2944 status
= nfserr_wrong_cred
;
2945 if (!nfsd4_mach_creds_match(session
->se_client
, rqstp
))
2947 status
= nfsd4_map_bcts_dir(&bcts
->dir
);
2950 conn
= alloc_conn(rqstp
, bcts
->dir
);
2951 status
= nfserr_jukebox
;
2954 nfsd4_init_conn(rqstp
, conn
, session
);
2957 nfsd4_put_session(session
);
2962 static bool nfsd4_compound_in_session(struct nfsd4_compound_state
*cstate
, struct nfs4_sessionid
*sid
)
2964 if (!cstate
->session
)
2966 return !memcmp(sid
, &cstate
->session
->se_sessionid
, sizeof(*sid
));
2970 nfsd4_destroy_session(struct svc_rqst
*r
, struct nfsd4_compound_state
*cstate
,
2971 union nfsd4_op_u
*u
)
2973 struct nfs4_sessionid
*sessionid
= &u
->destroy_session
.sessionid
;
2974 struct nfsd4_session
*ses
;
2976 int ref_held_by_me
= 0;
2977 struct net
*net
= SVC_NET(r
);
2978 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
2980 status
= nfserr_not_only_op
;
2981 if (nfsd4_compound_in_session(cstate
, sessionid
)) {
2982 if (!nfsd4_last_compound_op(r
))
2986 dump_sessionid(__func__
, sessionid
);
2987 spin_lock(&nn
->client_lock
);
2988 ses
= find_in_sessionid_hashtbl(sessionid
, net
, &status
);
2990 goto out_client_lock
;
2991 status
= nfserr_wrong_cred
;
2992 if (!nfsd4_mach_creds_match(ses
->se_client
, r
))
2993 goto out_put_session
;
2994 status
= mark_session_dead_locked(ses
, 1 + ref_held_by_me
);
2996 goto out_put_session
;
2997 unhash_session(ses
);
2998 spin_unlock(&nn
->client_lock
);
3000 nfsd4_probe_callback_sync(ses
->se_client
);
3002 spin_lock(&nn
->client_lock
);
3005 nfsd4_put_session_locked(ses
);
3007 spin_unlock(&nn
->client_lock
);
3012 static struct nfsd4_conn
*__nfsd4_find_conn(struct svc_xprt
*xpt
, struct nfsd4_session
*s
)
3014 struct nfsd4_conn
*c
;
3016 list_for_each_entry(c
, &s
->se_conns
, cn_persession
) {
3017 if (c
->cn_xprt
== xpt
) {
3024 static __be32
nfsd4_sequence_check_conn(struct nfsd4_conn
*new, struct nfsd4_session
*ses
)
3026 struct nfs4_client
*clp
= ses
->se_client
;
3027 struct nfsd4_conn
*c
;
3028 __be32 status
= nfs_ok
;
3031 spin_lock(&clp
->cl_lock
);
3032 c
= __nfsd4_find_conn(new->cn_xprt
, ses
);
3035 status
= nfserr_conn_not_bound_to_session
;
3036 if (clp
->cl_mach_cred
)
3038 __nfsd4_hash_conn(new, ses
);
3039 spin_unlock(&clp
->cl_lock
);
3040 ret
= nfsd4_register_conn(new);
3042 /* oops; xprt is already down: */
3043 nfsd4_conn_lost(&new->cn_xpt_user
);
3046 spin_unlock(&clp
->cl_lock
);
3051 static bool nfsd4_session_too_many_ops(struct svc_rqst
*rqstp
, struct nfsd4_session
*session
)
3053 struct nfsd4_compoundargs
*args
= rqstp
->rq_argp
;
3055 return args
->opcnt
> session
->se_fchannel
.maxops
;
3058 static bool nfsd4_request_too_big(struct svc_rqst
*rqstp
,
3059 struct nfsd4_session
*session
)
3061 struct xdr_buf
*xb
= &rqstp
->rq_arg
;
3063 return xb
->len
> session
->se_fchannel
.maxreq_sz
;
3066 static bool replay_matches_cache(struct svc_rqst
*rqstp
,
3067 struct nfsd4_sequence
*seq
, struct nfsd4_slot
*slot
)
3069 struct nfsd4_compoundargs
*argp
= rqstp
->rq_argp
;
3071 if ((bool)(slot
->sl_flags
& NFSD4_SLOT_CACHETHIS
) !=
3072 (bool)seq
->cachethis
)
3075 * If there's an error than the reply can have fewer ops than
3076 * the call. But if we cached a reply with *more* ops than the
3077 * call you're sending us now, then this new call is clearly not
3078 * really a replay of the old one:
3080 if (slot
->sl_opcnt
< argp
->opcnt
)
3082 /* This is the only check explicitly called by spec: */
3083 if (!same_creds(&rqstp
->rq_cred
, &slot
->sl_cred
))
3086 * There may be more comparisons we could actually do, but the
3087 * spec doesn't require us to catch every case where the calls
3088 * don't match (that would require caching the call as well as
3089 * the reply), so we don't bother.
3095 nfsd4_sequence(struct svc_rqst
*rqstp
, struct nfsd4_compound_state
*cstate
,
3096 union nfsd4_op_u
*u
)
3098 struct nfsd4_sequence
*seq
= &u
->sequence
;
3099 struct nfsd4_compoundres
*resp
= rqstp
->rq_resp
;
3100 struct xdr_stream
*xdr
= &resp
->xdr
;
3101 struct nfsd4_session
*session
;
3102 struct nfs4_client
*clp
;
3103 struct nfsd4_slot
*slot
;
3104 struct nfsd4_conn
*conn
;
3107 struct net
*net
= SVC_NET(rqstp
);
3108 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
3110 if (resp
->opcnt
!= 1)
3111 return nfserr_sequence_pos
;
3114 * Will be either used or freed by nfsd4_sequence_check_conn
3117 conn
= alloc_conn(rqstp
, NFS4_CDFC4_FORE
);
3119 return nfserr_jukebox
;
3121 spin_lock(&nn
->client_lock
);
3122 session
= find_in_sessionid_hashtbl(&seq
->sessionid
, net
, &status
);
3124 goto out_no_session
;
3125 clp
= session
->se_client
;
3127 status
= nfserr_too_many_ops
;
3128 if (nfsd4_session_too_many_ops(rqstp
, session
))
3129 goto out_put_session
;
3131 status
= nfserr_req_too_big
;
3132 if (nfsd4_request_too_big(rqstp
, session
))
3133 goto out_put_session
;
3135 status
= nfserr_badslot
;
3136 if (seq
->slotid
>= session
->se_fchannel
.maxreqs
)
3137 goto out_put_session
;
3139 slot
= session
->se_slots
[seq
->slotid
];
3140 dprintk("%s: slotid %d\n", __func__
, seq
->slotid
);
3142 /* We do not negotiate the number of slots yet, so set the
3143 * maxslots to the session maxreqs which is used to encode
3144 * sr_highest_slotid and the sr_target_slot id to maxslots */
3145 seq
->maxslots
= session
->se_fchannel
.maxreqs
;
3147 status
= check_slot_seqid(seq
->seqid
, slot
->sl_seqid
,
3148 slot
->sl_flags
& NFSD4_SLOT_INUSE
);
3149 if (status
== nfserr_replay_cache
) {
3150 status
= nfserr_seq_misordered
;
3151 if (!(slot
->sl_flags
& NFSD4_SLOT_INITIALIZED
))
3152 goto out_put_session
;
3153 status
= nfserr_seq_false_retry
;
3154 if (!replay_matches_cache(rqstp
, seq
, slot
))
3155 goto out_put_session
;
3156 cstate
->slot
= slot
;
3157 cstate
->session
= session
;
3159 /* Return the cached reply status and set cstate->status
3160 * for nfsd4_proc_compound processing */
3161 status
= nfsd4_replay_cache_entry(resp
, seq
);
3162 cstate
->status
= nfserr_replay_cache
;
3166 goto out_put_session
;
3168 status
= nfsd4_sequence_check_conn(conn
, session
);
3171 goto out_put_session
;
3173 buflen
= (seq
->cachethis
) ?
3174 session
->se_fchannel
.maxresp_cached
:
3175 session
->se_fchannel
.maxresp_sz
;
3176 status
= (seq
->cachethis
) ? nfserr_rep_too_big_to_cache
:
3178 if (xdr_restrict_buflen(xdr
, buflen
- rqstp
->rq_auth_slack
))
3179 goto out_put_session
;
3180 svc_reserve(rqstp
, buflen
);
3183 /* Success! bump slot seqid */
3184 slot
->sl_seqid
= seq
->seqid
;
3185 slot
->sl_flags
|= NFSD4_SLOT_INUSE
;
3187 slot
->sl_flags
|= NFSD4_SLOT_CACHETHIS
;
3189 slot
->sl_flags
&= ~NFSD4_SLOT_CACHETHIS
;
3191 cstate
->slot
= slot
;
3192 cstate
->session
= session
;
3196 switch (clp
->cl_cb_state
) {
3198 seq
->status_flags
= SEQ4_STATUS_CB_PATH_DOWN
;
3200 case NFSD4_CB_FAULT
:
3201 seq
->status_flags
= SEQ4_STATUS_BACKCHANNEL_FAULT
;
3204 seq
->status_flags
= 0;
3206 if (!list_empty(&clp
->cl_revoked
))
3207 seq
->status_flags
|= SEQ4_STATUS_RECALLABLE_STATE_REVOKED
;
3211 spin_unlock(&nn
->client_lock
);
3214 nfsd4_put_session_locked(session
);
3215 goto out_no_session
;
3219 nfsd4_sequence_done(struct nfsd4_compoundres
*resp
)
3221 struct nfsd4_compound_state
*cs
= &resp
->cstate
;
3223 if (nfsd4_has_session(cs
)) {
3224 if (cs
->status
!= nfserr_replay_cache
) {
3225 nfsd4_store_cache_entry(resp
);
3226 cs
->slot
->sl_flags
&= ~NFSD4_SLOT_INUSE
;
3228 /* Drop session reference that was taken in nfsd4_sequence() */
3229 nfsd4_put_session(cs
->session
);
3231 put_client_renew(cs
->clp
);
3235 nfsd4_destroy_clientid(struct svc_rqst
*rqstp
,
3236 struct nfsd4_compound_state
*cstate
,
3237 union nfsd4_op_u
*u
)
3239 struct nfsd4_destroy_clientid
*dc
= &u
->destroy_clientid
;
3240 struct nfs4_client
*conf
, *unconf
;
3241 struct nfs4_client
*clp
= NULL
;
3243 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
3245 spin_lock(&nn
->client_lock
);
3246 unconf
= find_unconfirmed_client(&dc
->clientid
, true, nn
);
3247 conf
= find_confirmed_client(&dc
->clientid
, true, nn
);
3248 WARN_ON_ONCE(conf
&& unconf
);
3251 if (client_has_state(conf
)) {
3252 status
= nfserr_clientid_busy
;
3255 status
= mark_client_expired_locked(conf
);
3262 status
= nfserr_stale_clientid
;
3265 if (!nfsd4_mach_creds_match(clp
, rqstp
)) {
3267 status
= nfserr_wrong_cred
;
3270 unhash_client_locked(clp
);
3272 spin_unlock(&nn
->client_lock
);
3279 nfsd4_reclaim_complete(struct svc_rqst
*rqstp
,
3280 struct nfsd4_compound_state
*cstate
, union nfsd4_op_u
*u
)
3282 struct nfsd4_reclaim_complete
*rc
= &u
->reclaim_complete
;
3285 if (rc
->rca_one_fs
) {
3286 if (!cstate
->current_fh
.fh_dentry
)
3287 return nfserr_nofilehandle
;
3289 * We don't take advantage of the rca_one_fs case.
3290 * That's OK, it's optional, we can safely ignore it.
3295 status
= nfserr_complete_already
;
3296 if (test_and_set_bit(NFSD4_CLIENT_RECLAIM_COMPLETE
,
3297 &cstate
->session
->se_client
->cl_flags
))
3300 status
= nfserr_stale_clientid
;
3301 if (is_client_expired(cstate
->session
->se_client
))
3303 * The following error isn't really legal.
3304 * But we only get here if the client just explicitly
3305 * destroyed the client. Surely it no longer cares what
3306 * error it gets back on an operation for the dead
3312 nfsd4_client_record_create(cstate
->session
->se_client
);
3318 nfsd4_setclientid(struct svc_rqst
*rqstp
, struct nfsd4_compound_state
*cstate
,
3319 union nfsd4_op_u
*u
)
3321 struct nfsd4_setclientid
*setclid
= &u
->setclientid
;
3322 struct xdr_netobj clname
= setclid
->se_name
;
3323 nfs4_verifier clverifier
= setclid
->se_verf
;
3324 struct nfs4_client
*conf
, *new;
3325 struct nfs4_client
*unconf
= NULL
;
3327 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
3329 new = create_client(clname
, rqstp
, &clverifier
);
3331 return nfserr_jukebox
;
3332 /* Cases below refer to rfc 3530 section 14.2.33: */
3333 spin_lock(&nn
->client_lock
);
3334 conf
= find_confirmed_client_by_name(&clname
, nn
);
3335 if (conf
&& client_has_state(conf
)) {
3337 status
= nfserr_clid_inuse
;
3338 if (clp_used_exchangeid(conf
))
3340 if (!same_creds(&conf
->cl_cred
, &rqstp
->rq_cred
)) {
3341 char addr_str
[INET6_ADDRSTRLEN
];
3342 rpc_ntop((struct sockaddr
*) &conf
->cl_addr
, addr_str
,
3344 dprintk("NFSD: setclientid: string in use by client "
3345 "at %s\n", addr_str
);
3349 unconf
= find_unconfirmed_client_by_name(&clname
, nn
);
3351 unhash_client_locked(unconf
);
3352 if (conf
&& same_verf(&conf
->cl_verifier
, &clverifier
)) {
3353 /* case 1: probable callback update */
3354 copy_clid(new, conf
);
3355 gen_confirm(new, nn
);
3356 } else /* case 4 (new client) or cases 2, 3 (client reboot): */
3358 new->cl_minorversion
= 0;
3359 gen_callback(new, setclid
, rqstp
);
3360 add_to_unconfirmed(new);
3361 setclid
->se_clientid
.cl_boot
= new->cl_clientid
.cl_boot
;
3362 setclid
->se_clientid
.cl_id
= new->cl_clientid
.cl_id
;
3363 memcpy(setclid
->se_confirm
.data
, new->cl_confirm
.data
, sizeof(setclid
->se_confirm
.data
));
3367 spin_unlock(&nn
->client_lock
);
3371 expire_client(unconf
);
3377 nfsd4_setclientid_confirm(struct svc_rqst
*rqstp
,
3378 struct nfsd4_compound_state
*cstate
,
3379 union nfsd4_op_u
*u
)
3381 struct nfsd4_setclientid_confirm
*setclientid_confirm
=
3382 &u
->setclientid_confirm
;
3383 struct nfs4_client
*conf
, *unconf
;
3384 struct nfs4_client
*old
= NULL
;
3385 nfs4_verifier confirm
= setclientid_confirm
->sc_confirm
;
3386 clientid_t
* clid
= &setclientid_confirm
->sc_clientid
;
3388 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
3390 if (STALE_CLIENTID(clid
, nn
))
3391 return nfserr_stale_clientid
;
3393 spin_lock(&nn
->client_lock
);
3394 conf
= find_confirmed_client(clid
, false, nn
);
3395 unconf
= find_unconfirmed_client(clid
, false, nn
);
3397 * We try hard to give out unique clientid's, so if we get an
3398 * attempt to confirm the same clientid with a different cred,
3399 * the client may be buggy; this should never happen.
3401 * Nevertheless, RFC 7530 recommends INUSE for this case:
3403 status
= nfserr_clid_inuse
;
3404 if (unconf
&& !same_creds(&unconf
->cl_cred
, &rqstp
->rq_cred
))
3406 if (conf
&& !same_creds(&conf
->cl_cred
, &rqstp
->rq_cred
))
3408 /* cases below refer to rfc 3530 section 14.2.34: */
3409 if (!unconf
|| !same_verf(&confirm
, &unconf
->cl_confirm
)) {
3410 if (conf
&& same_verf(&confirm
, &conf
->cl_confirm
)) {
3411 /* case 2: probable retransmit */
3413 } else /* case 4: client hasn't noticed we rebooted yet? */
3414 status
= nfserr_stale_clientid
;
3418 if (conf
) { /* case 1: callback update */
3420 unhash_client_locked(old
);
3421 nfsd4_change_callback(conf
, &unconf
->cl_cb_conn
);
3422 } else { /* case 3: normal case; new or rebooted client */
3423 old
= find_confirmed_client_by_name(&unconf
->cl_name
, nn
);
3425 status
= nfserr_clid_inuse
;
3426 if (client_has_state(old
)
3427 && !same_creds(&unconf
->cl_cred
,
3430 status
= mark_client_expired_locked(old
);
3436 move_to_confirmed(unconf
);
3439 get_client_locked(conf
);
3440 spin_unlock(&nn
->client_lock
);
3441 nfsd4_probe_callback(conf
);
3442 spin_lock(&nn
->client_lock
);
3443 put_client_renew_locked(conf
);
3445 spin_unlock(&nn
->client_lock
);
3451 static struct nfs4_file
*nfsd4_alloc_file(void)
3453 return kmem_cache_alloc(file_slab
, GFP_KERNEL
);
3456 /* OPEN Share state helper functions */
3457 static void nfsd4_init_file(struct knfsd_fh
*fh
, unsigned int hashval
,
3458 struct nfs4_file
*fp
)
3460 lockdep_assert_held(&state_lock
);
3462 refcount_set(&fp
->fi_ref
, 1);
3463 spin_lock_init(&fp
->fi_lock
);
3464 INIT_LIST_HEAD(&fp
->fi_stateids
);
3465 INIT_LIST_HEAD(&fp
->fi_delegations
);
3466 INIT_LIST_HEAD(&fp
->fi_clnt_odstate
);
3467 fh_copy_shallow(&fp
->fi_fhandle
, fh
);
3468 fp
->fi_deleg_file
= NULL
;
3469 fp
->fi_had_conflict
= false;
3470 fp
->fi_share_deny
= 0;
3471 memset(fp
->fi_fds
, 0, sizeof(fp
->fi_fds
));
3472 memset(fp
->fi_access
, 0, sizeof(fp
->fi_access
));
3473 #ifdef CONFIG_NFSD_PNFS
3474 INIT_LIST_HEAD(&fp
->fi_lo_states
);
3475 atomic_set(&fp
->fi_lo_recalls
, 0);
3477 hlist_add_head_rcu(&fp
->fi_hash
, &file_hashtbl
[hashval
]);
3481 nfsd4_free_slabs(void)
3483 kmem_cache_destroy(client_slab
);
3484 kmem_cache_destroy(openowner_slab
);
3485 kmem_cache_destroy(lockowner_slab
);
3486 kmem_cache_destroy(file_slab
);
3487 kmem_cache_destroy(stateid_slab
);
3488 kmem_cache_destroy(deleg_slab
);
3489 kmem_cache_destroy(odstate_slab
);
3493 nfsd4_init_slabs(void)
3495 client_slab
= kmem_cache_create("nfsd4_clients",
3496 sizeof(struct nfs4_client
), 0, 0, NULL
);
3497 if (client_slab
== NULL
)
3499 openowner_slab
= kmem_cache_create("nfsd4_openowners",
3500 sizeof(struct nfs4_openowner
), 0, 0, NULL
);
3501 if (openowner_slab
== NULL
)
3502 goto out_free_client_slab
;
3503 lockowner_slab
= kmem_cache_create("nfsd4_lockowners",
3504 sizeof(struct nfs4_lockowner
), 0, 0, NULL
);
3505 if (lockowner_slab
== NULL
)
3506 goto out_free_openowner_slab
;
3507 file_slab
= kmem_cache_create("nfsd4_files",
3508 sizeof(struct nfs4_file
), 0, 0, NULL
);
3509 if (file_slab
== NULL
)
3510 goto out_free_lockowner_slab
;
3511 stateid_slab
= kmem_cache_create("nfsd4_stateids",
3512 sizeof(struct nfs4_ol_stateid
), 0, 0, NULL
);
3513 if (stateid_slab
== NULL
)
3514 goto out_free_file_slab
;
3515 deleg_slab
= kmem_cache_create("nfsd4_delegations",
3516 sizeof(struct nfs4_delegation
), 0, 0, NULL
);
3517 if (deleg_slab
== NULL
)
3518 goto out_free_stateid_slab
;
3519 odstate_slab
= kmem_cache_create("nfsd4_odstate",
3520 sizeof(struct nfs4_clnt_odstate
), 0, 0, NULL
);
3521 if (odstate_slab
== NULL
)
3522 goto out_free_deleg_slab
;
3525 out_free_deleg_slab
:
3526 kmem_cache_destroy(deleg_slab
);
3527 out_free_stateid_slab
:
3528 kmem_cache_destroy(stateid_slab
);
3530 kmem_cache_destroy(file_slab
);
3531 out_free_lockowner_slab
:
3532 kmem_cache_destroy(lockowner_slab
);
3533 out_free_openowner_slab
:
3534 kmem_cache_destroy(openowner_slab
);
3535 out_free_client_slab
:
3536 kmem_cache_destroy(client_slab
);
3538 dprintk("nfsd4: out of memory while initializing nfsv4\n");
3542 static void init_nfs4_replay(struct nfs4_replay
*rp
)
3544 rp
->rp_status
= nfserr_serverfault
;
3546 rp
->rp_buf
= rp
->rp_ibuf
;
3547 mutex_init(&rp
->rp_mutex
);
3550 static void nfsd4_cstate_assign_replay(struct nfsd4_compound_state
*cstate
,
3551 struct nfs4_stateowner
*so
)
3553 if (!nfsd4_has_session(cstate
)) {
3554 mutex_lock(&so
->so_replay
.rp_mutex
);
3555 cstate
->replay_owner
= nfs4_get_stateowner(so
);
3559 void nfsd4_cstate_clear_replay(struct nfsd4_compound_state
*cstate
)
3561 struct nfs4_stateowner
*so
= cstate
->replay_owner
;
3564 cstate
->replay_owner
= NULL
;
3565 mutex_unlock(&so
->so_replay
.rp_mutex
);
3566 nfs4_put_stateowner(so
);
3570 static inline void *alloc_stateowner(struct kmem_cache
*slab
, struct xdr_netobj
*owner
, struct nfs4_client
*clp
)
3572 struct nfs4_stateowner
*sop
;
3574 sop
= kmem_cache_alloc(slab
, GFP_KERNEL
);
3578 sop
->so_owner
.data
= kmemdup(owner
->data
, owner
->len
, GFP_KERNEL
);
3579 if (!sop
->so_owner
.data
) {
3580 kmem_cache_free(slab
, sop
);
3583 sop
->so_owner
.len
= owner
->len
;
3585 INIT_LIST_HEAD(&sop
->so_stateids
);
3586 sop
->so_client
= clp
;
3587 init_nfs4_replay(&sop
->so_replay
);
3588 atomic_set(&sop
->so_count
, 1);
3592 static void hash_openowner(struct nfs4_openowner
*oo
, struct nfs4_client
*clp
, unsigned int strhashval
)
3594 lockdep_assert_held(&clp
->cl_lock
);
3596 list_add(&oo
->oo_owner
.so_strhash
,
3597 &clp
->cl_ownerstr_hashtbl
[strhashval
]);
3598 list_add(&oo
->oo_perclient
, &clp
->cl_openowners
);
3601 static void nfs4_unhash_openowner(struct nfs4_stateowner
*so
)
3603 unhash_openowner_locked(openowner(so
));
3606 static void nfs4_free_openowner(struct nfs4_stateowner
*so
)
3608 struct nfs4_openowner
*oo
= openowner(so
);
3610 kmem_cache_free(openowner_slab
, oo
);
3613 static const struct nfs4_stateowner_operations openowner_ops
= {
3614 .so_unhash
= nfs4_unhash_openowner
,
3615 .so_free
= nfs4_free_openowner
,
3618 static struct nfs4_ol_stateid
*
3619 nfsd4_find_existing_open(struct nfs4_file
*fp
, struct nfsd4_open
*open
)
3621 struct nfs4_ol_stateid
*local
, *ret
= NULL
;
3622 struct nfs4_openowner
*oo
= open
->op_openowner
;
3624 lockdep_assert_held(&fp
->fi_lock
);
3626 list_for_each_entry(local
, &fp
->fi_stateids
, st_perfile
) {
3627 /* ignore lock owners */
3628 if (local
->st_stateowner
->so_is_open_owner
== 0)
3630 if (local
->st_stateowner
!= &oo
->oo_owner
)
3632 if (local
->st_stid
.sc_type
== NFS4_OPEN_STID
) {
3634 refcount_inc(&ret
->st_stid
.sc_count
);
3642 nfsd4_verify_open_stid(struct nfs4_stid
*s
)
3644 __be32 ret
= nfs_ok
;
3646 switch (s
->sc_type
) {
3650 case NFS4_CLOSED_STID
:
3651 case NFS4_CLOSED_DELEG_STID
:
3652 ret
= nfserr_bad_stateid
;
3654 case NFS4_REVOKED_DELEG_STID
:
3655 ret
= nfserr_deleg_revoked
;
3660 /* Lock the stateid st_mutex, and deal with races with CLOSE */
3662 nfsd4_lock_ol_stateid(struct nfs4_ol_stateid
*stp
)
3666 mutex_lock_nested(&stp
->st_mutex
, LOCK_STATEID_MUTEX
);
3667 ret
= nfsd4_verify_open_stid(&stp
->st_stid
);
3669 mutex_unlock(&stp
->st_mutex
);
3673 static struct nfs4_ol_stateid
*
3674 nfsd4_find_and_lock_existing_open(struct nfs4_file
*fp
, struct nfsd4_open
*open
)
3676 struct nfs4_ol_stateid
*stp
;
3678 spin_lock(&fp
->fi_lock
);
3679 stp
= nfsd4_find_existing_open(fp
, open
);
3680 spin_unlock(&fp
->fi_lock
);
3681 if (!stp
|| nfsd4_lock_ol_stateid(stp
) == nfs_ok
)
3683 nfs4_put_stid(&stp
->st_stid
);
3688 static struct nfs4_openowner
*
3689 alloc_init_open_stateowner(unsigned int strhashval
, struct nfsd4_open
*open
,
3690 struct nfsd4_compound_state
*cstate
)
3692 struct nfs4_client
*clp
= cstate
->clp
;
3693 struct nfs4_openowner
*oo
, *ret
;
3695 oo
= alloc_stateowner(openowner_slab
, &open
->op_owner
, clp
);
3698 oo
->oo_owner
.so_ops
= &openowner_ops
;
3699 oo
->oo_owner
.so_is_open_owner
= 1;
3700 oo
->oo_owner
.so_seqid
= open
->op_seqid
;
3702 if (nfsd4_has_session(cstate
))
3703 oo
->oo_flags
|= NFS4_OO_CONFIRMED
;
3705 oo
->oo_last_closed_stid
= NULL
;
3706 INIT_LIST_HEAD(&oo
->oo_close_lru
);
3707 spin_lock(&clp
->cl_lock
);
3708 ret
= find_openstateowner_str_locked(strhashval
, open
, clp
);
3710 hash_openowner(oo
, clp
, strhashval
);
3713 nfs4_free_stateowner(&oo
->oo_owner
);
3715 spin_unlock(&clp
->cl_lock
);
3719 static struct nfs4_ol_stateid
*
3720 init_open_stateid(struct nfs4_file
*fp
, struct nfsd4_open
*open
)
3723 struct nfs4_openowner
*oo
= open
->op_openowner
;
3724 struct nfs4_ol_stateid
*retstp
= NULL
;
3725 struct nfs4_ol_stateid
*stp
;
3728 /* We are moving these outside of the spinlocks to avoid the warnings */
3729 mutex_init(&stp
->st_mutex
);
3730 mutex_lock_nested(&stp
->st_mutex
, OPEN_STATEID_MUTEX
);
3733 spin_lock(&oo
->oo_owner
.so_client
->cl_lock
);
3734 spin_lock(&fp
->fi_lock
);
3736 retstp
= nfsd4_find_existing_open(fp
, open
);
3740 open
->op_stp
= NULL
;
3741 refcount_inc(&stp
->st_stid
.sc_count
);
3742 stp
->st_stid
.sc_type
= NFS4_OPEN_STID
;
3743 INIT_LIST_HEAD(&stp
->st_locks
);
3744 stp
->st_stateowner
= nfs4_get_stateowner(&oo
->oo_owner
);
3746 stp
->st_stid
.sc_file
= fp
;
3747 stp
->st_access_bmap
= 0;
3748 stp
->st_deny_bmap
= 0;
3749 stp
->st_openstp
= NULL
;
3750 list_add(&stp
->st_perstateowner
, &oo
->oo_owner
.so_stateids
);
3751 list_add(&stp
->st_perfile
, &fp
->fi_stateids
);
3754 spin_unlock(&fp
->fi_lock
);
3755 spin_unlock(&oo
->oo_owner
.so_client
->cl_lock
);
3757 /* Handle races with CLOSE */
3758 if (nfsd4_lock_ol_stateid(retstp
) != nfs_ok
) {
3759 nfs4_put_stid(&retstp
->st_stid
);
3762 /* To keep mutex tracking happy */
3763 mutex_unlock(&stp
->st_mutex
);
3770 * In the 4.0 case we need to keep the owners around a little while to handle
3771 * CLOSE replay. We still do need to release any file access that is held by
3772 * them before returning however.
3775 move_to_close_lru(struct nfs4_ol_stateid
*s
, struct net
*net
)
3777 struct nfs4_ol_stateid
*last
;
3778 struct nfs4_openowner
*oo
= openowner(s
->st_stateowner
);
3779 struct nfsd_net
*nn
= net_generic(s
->st_stid
.sc_client
->net
,
3782 dprintk("NFSD: move_to_close_lru nfs4_openowner %p\n", oo
);
3785 * We know that we hold one reference via nfsd4_close, and another
3786 * "persistent" reference for the client. If the refcount is higher
3787 * than 2, then there are still calls in progress that are using this
3788 * stateid. We can't put the sc_file reference until they are finished.
3789 * Wait for the refcount to drop to 2. Since it has been unhashed,
3790 * there should be no danger of the refcount going back up again at
3793 wait_event(close_wq
, refcount_read(&s
->st_stid
.sc_count
) == 2);
3795 release_all_access(s
);
3796 if (s
->st_stid
.sc_file
) {
3797 put_nfs4_file(s
->st_stid
.sc_file
);
3798 s
->st_stid
.sc_file
= NULL
;
3801 spin_lock(&nn
->client_lock
);
3802 last
= oo
->oo_last_closed_stid
;
3803 oo
->oo_last_closed_stid
= s
;
3804 list_move_tail(&oo
->oo_close_lru
, &nn
->close_lru
);
3805 oo
->oo_time
= get_seconds();
3806 spin_unlock(&nn
->client_lock
);
3808 nfs4_put_stid(&last
->st_stid
);
3811 /* search file_hashtbl[] for file */
3812 static struct nfs4_file
*
3813 find_file_locked(struct knfsd_fh
*fh
, unsigned int hashval
)
3815 struct nfs4_file
*fp
;
3817 hlist_for_each_entry_rcu(fp
, &file_hashtbl
[hashval
], fi_hash
) {
3818 if (fh_match(&fp
->fi_fhandle
, fh
)) {
3819 if (refcount_inc_not_zero(&fp
->fi_ref
))
3827 find_file(struct knfsd_fh
*fh
)
3829 struct nfs4_file
*fp
;
3830 unsigned int hashval
= file_hashval(fh
);
3833 fp
= find_file_locked(fh
, hashval
);
3838 static struct nfs4_file
*
3839 find_or_add_file(struct nfs4_file
*new, struct knfsd_fh
*fh
)
3841 struct nfs4_file
*fp
;
3842 unsigned int hashval
= file_hashval(fh
);
3845 fp
= find_file_locked(fh
, hashval
);
3850 spin_lock(&state_lock
);
3851 fp
= find_file_locked(fh
, hashval
);
3852 if (likely(fp
== NULL
)) {
3853 nfsd4_init_file(fh
, hashval
, new);
3856 spin_unlock(&state_lock
);
3862 * Called to check deny when READ with all zero stateid or
3863 * WRITE with all zero or all one stateid
3866 nfs4_share_conflict(struct svc_fh
*current_fh
, unsigned int deny_type
)
3868 struct nfs4_file
*fp
;
3869 __be32 ret
= nfs_ok
;
3871 fp
= find_file(¤t_fh
->fh_handle
);
3874 /* Check for conflicting share reservations */
3875 spin_lock(&fp
->fi_lock
);
3876 if (fp
->fi_share_deny
& deny_type
)
3877 ret
= nfserr_locked
;
3878 spin_unlock(&fp
->fi_lock
);
3883 static void nfsd4_cb_recall_prepare(struct nfsd4_callback
*cb
)
3885 struct nfs4_delegation
*dp
= cb_to_delegation(cb
);
3886 struct nfsd_net
*nn
= net_generic(dp
->dl_stid
.sc_client
->net
,
3889 block_delegations(&dp
->dl_stid
.sc_file
->fi_fhandle
);
3892 * We can't do this in nfsd_break_deleg_cb because it is
3893 * already holding inode->i_lock.
3895 * If the dl_time != 0, then we know that it has already been
3896 * queued for a lease break. Don't queue it again.
3898 spin_lock(&state_lock
);
3899 if (dp
->dl_time
== 0) {
3900 dp
->dl_time
= get_seconds();
3901 list_add_tail(&dp
->dl_recall_lru
, &nn
->del_recall_lru
);
3903 spin_unlock(&state_lock
);
3906 static int nfsd4_cb_recall_done(struct nfsd4_callback
*cb
,
3907 struct rpc_task
*task
)
3909 struct nfs4_delegation
*dp
= cb_to_delegation(cb
);
3911 if (dp
->dl_stid
.sc_type
== NFS4_CLOSED_DELEG_STID
)
3914 switch (task
->tk_status
) {
3918 case -NFS4ERR_BAD_STATEID
:
3920 * Race: client probably got cb_recall before open reply
3921 * granting delegation.
3923 if (dp
->dl_retries
--) {
3924 rpc_delay(task
, 2 * HZ
);
3933 static void nfsd4_cb_recall_release(struct nfsd4_callback
*cb
)
3935 struct nfs4_delegation
*dp
= cb_to_delegation(cb
);
3937 nfs4_put_stid(&dp
->dl_stid
);
3940 static const struct nfsd4_callback_ops nfsd4_cb_recall_ops
= {
3941 .prepare
= nfsd4_cb_recall_prepare
,
3942 .done
= nfsd4_cb_recall_done
,
3943 .release
= nfsd4_cb_recall_release
,
3946 static void nfsd_break_one_deleg(struct nfs4_delegation
*dp
)
3949 * We're assuming the state code never drops its reference
3950 * without first removing the lease. Since we're in this lease
3951 * callback (and since the lease code is serialized by the
3952 * i_lock) we know the server hasn't removed the lease yet, and
3953 * we know it's safe to take a reference.
3955 refcount_inc(&dp
->dl_stid
.sc_count
);
3956 nfsd4_run_cb(&dp
->dl_recall
);
3959 /* Called from break_lease() with i_lock held. */
3961 nfsd_break_deleg_cb(struct file_lock
*fl
)
3964 struct nfs4_delegation
*dp
= (struct nfs4_delegation
*)fl
->fl_owner
;
3965 struct nfs4_file
*fp
= dp
->dl_stid
.sc_file
;
3968 * We don't want the locks code to timeout the lease for us;
3969 * we'll remove it ourself if a delegation isn't returned
3972 fl
->fl_break_time
= 0;
3974 spin_lock(&fp
->fi_lock
);
3975 fp
->fi_had_conflict
= true;
3976 nfsd_break_one_deleg(dp
);
3977 spin_unlock(&fp
->fi_lock
);
3982 nfsd_change_deleg_cb(struct file_lock
*onlist
, int arg
,
3983 struct list_head
*dispose
)
3986 return lease_modify(onlist
, arg
, dispose
);
3991 static const struct lock_manager_operations nfsd_lease_mng_ops
= {
3992 .lm_break
= nfsd_break_deleg_cb
,
3993 .lm_change
= nfsd_change_deleg_cb
,
3996 static __be32
nfsd4_check_seqid(struct nfsd4_compound_state
*cstate
, struct nfs4_stateowner
*so
, u32 seqid
)
3998 if (nfsd4_has_session(cstate
))
4000 if (seqid
== so
->so_seqid
- 1)
4001 return nfserr_replay_me
;
4002 if (seqid
== so
->so_seqid
)
4004 return nfserr_bad_seqid
;
4007 static __be32
lookup_clientid(clientid_t
*clid
,
4008 struct nfsd4_compound_state
*cstate
,
4009 struct nfsd_net
*nn
)
4011 struct nfs4_client
*found
;
4014 found
= cstate
->clp
;
4015 if (!same_clid(&found
->cl_clientid
, clid
))
4016 return nfserr_stale_clientid
;
4020 if (STALE_CLIENTID(clid
, nn
))
4021 return nfserr_stale_clientid
;
4024 * For v4.1+ we get the client in the SEQUENCE op. If we don't have one
4025 * cached already then we know this is for is for v4.0 and "sessions"
4028 WARN_ON_ONCE(cstate
->session
);
4029 spin_lock(&nn
->client_lock
);
4030 found
= find_confirmed_client(clid
, false, nn
);
4032 spin_unlock(&nn
->client_lock
);
4033 return nfserr_expired
;
4035 atomic_inc(&found
->cl_refcount
);
4036 spin_unlock(&nn
->client_lock
);
4038 /* Cache the nfs4_client in cstate! */
4039 cstate
->clp
= found
;
4044 nfsd4_process_open1(struct nfsd4_compound_state
*cstate
,
4045 struct nfsd4_open
*open
, struct nfsd_net
*nn
)
4047 clientid_t
*clientid
= &open
->op_clientid
;
4048 struct nfs4_client
*clp
= NULL
;
4049 unsigned int strhashval
;
4050 struct nfs4_openowner
*oo
= NULL
;
4053 if (STALE_CLIENTID(&open
->op_clientid
, nn
))
4054 return nfserr_stale_clientid
;
4056 * In case we need it later, after we've already created the
4057 * file and don't want to risk a further failure:
4059 open
->op_file
= nfsd4_alloc_file();
4060 if (open
->op_file
== NULL
)
4061 return nfserr_jukebox
;
4063 status
= lookup_clientid(clientid
, cstate
, nn
);
4068 strhashval
= ownerstr_hashval(&open
->op_owner
);
4069 oo
= find_openstateowner_str(strhashval
, open
, clp
);
4070 open
->op_openowner
= oo
;
4074 if (!(oo
->oo_flags
& NFS4_OO_CONFIRMED
)) {
4075 /* Replace unconfirmed owners without checking for replay. */
4076 release_openowner(oo
);
4077 open
->op_openowner
= NULL
;
4080 status
= nfsd4_check_seqid(cstate
, &oo
->oo_owner
, open
->op_seqid
);
4085 oo
= alloc_init_open_stateowner(strhashval
, open
, cstate
);
4087 return nfserr_jukebox
;
4088 open
->op_openowner
= oo
;
4090 open
->op_stp
= nfs4_alloc_open_stateid(clp
);
4092 return nfserr_jukebox
;
4094 if (nfsd4_has_session(cstate
) &&
4095 (cstate
->current_fh
.fh_export
->ex_flags
& NFSEXP_PNFS
)) {
4096 open
->op_odstate
= alloc_clnt_odstate(clp
);
4097 if (!open
->op_odstate
)
4098 return nfserr_jukebox
;
4104 static inline __be32
4105 nfs4_check_delegmode(struct nfs4_delegation
*dp
, int flags
)
4107 if ((flags
& WR_STATE
) && (dp
->dl_type
== NFS4_OPEN_DELEGATE_READ
))
4108 return nfserr_openmode
;
4113 static int share_access_to_flags(u32 share_access
)
4115 return share_access
== NFS4_SHARE_ACCESS_READ
? RD_STATE
: WR_STATE
;
4118 static struct nfs4_delegation
*find_deleg_stateid(struct nfs4_client
*cl
, stateid_t
*s
)
4120 struct nfs4_stid
*ret
;
4122 ret
= find_stateid_by_type(cl
, s
,
4123 NFS4_DELEG_STID
|NFS4_REVOKED_DELEG_STID
);
4126 return delegstateid(ret
);
4129 static bool nfsd4_is_deleg_cur(struct nfsd4_open
*open
)
4131 return open
->op_claim_type
== NFS4_OPEN_CLAIM_DELEGATE_CUR
||
4132 open
->op_claim_type
== NFS4_OPEN_CLAIM_DELEG_CUR_FH
;
4136 nfs4_check_deleg(struct nfs4_client
*cl
, struct nfsd4_open
*open
,
4137 struct nfs4_delegation
**dp
)
4140 __be32 status
= nfserr_bad_stateid
;
4141 struct nfs4_delegation
*deleg
;
4143 deleg
= find_deleg_stateid(cl
, &open
->op_delegate_stateid
);
4146 if (deleg
->dl_stid
.sc_type
== NFS4_REVOKED_DELEG_STID
) {
4147 nfs4_put_stid(&deleg
->dl_stid
);
4148 if (cl
->cl_minorversion
)
4149 status
= nfserr_deleg_revoked
;
4152 flags
= share_access_to_flags(open
->op_share_access
);
4153 status
= nfs4_check_delegmode(deleg
, flags
);
4155 nfs4_put_stid(&deleg
->dl_stid
);
4160 if (!nfsd4_is_deleg_cur(open
))
4164 open
->op_openowner
->oo_flags
|= NFS4_OO_CONFIRMED
;
4168 static inline int nfs4_access_to_access(u32 nfs4_access
)
4172 if (nfs4_access
& NFS4_SHARE_ACCESS_READ
)
4173 flags
|= NFSD_MAY_READ
;
4174 if (nfs4_access
& NFS4_SHARE_ACCESS_WRITE
)
4175 flags
|= NFSD_MAY_WRITE
;
4179 static inline __be32
4180 nfsd4_truncate(struct svc_rqst
*rqstp
, struct svc_fh
*fh
,
4181 struct nfsd4_open
*open
)
4183 struct iattr iattr
= {
4184 .ia_valid
= ATTR_SIZE
,
4187 if (!open
->op_truncate
)
4189 if (!(open
->op_share_access
& NFS4_SHARE_ACCESS_WRITE
))
4190 return nfserr_inval
;
4191 return nfsd_setattr(rqstp
, fh
, &iattr
, 0, (time_t)0);
4194 static __be32
nfs4_get_vfs_file(struct svc_rqst
*rqstp
, struct nfs4_file
*fp
,
4195 struct svc_fh
*cur_fh
, struct nfs4_ol_stateid
*stp
,
4196 struct nfsd4_open
*open
)
4198 struct file
*filp
= NULL
;
4200 int oflag
= nfs4_access_to_omode(open
->op_share_access
);
4201 int access
= nfs4_access_to_access(open
->op_share_access
);
4202 unsigned char old_access_bmap
, old_deny_bmap
;
4204 spin_lock(&fp
->fi_lock
);
4207 * Are we trying to set a deny mode that would conflict with
4210 status
= nfs4_file_check_deny(fp
, open
->op_share_deny
);
4211 if (status
!= nfs_ok
) {
4212 spin_unlock(&fp
->fi_lock
);
4216 /* set access to the file */
4217 status
= nfs4_file_get_access(fp
, open
->op_share_access
);
4218 if (status
!= nfs_ok
) {
4219 spin_unlock(&fp
->fi_lock
);
4223 /* Set access bits in stateid */
4224 old_access_bmap
= stp
->st_access_bmap
;
4225 set_access(open
->op_share_access
, stp
);
4227 /* Set new deny mask */
4228 old_deny_bmap
= stp
->st_deny_bmap
;
4229 set_deny(open
->op_share_deny
, stp
);
4230 fp
->fi_share_deny
|= (open
->op_share_deny
& NFS4_SHARE_DENY_BOTH
);
4232 if (!fp
->fi_fds
[oflag
]) {
4233 spin_unlock(&fp
->fi_lock
);
4234 status
= nfsd_open(rqstp
, cur_fh
, S_IFREG
, access
, &filp
);
4236 goto out_put_access
;
4237 spin_lock(&fp
->fi_lock
);
4238 if (!fp
->fi_fds
[oflag
]) {
4239 fp
->fi_fds
[oflag
] = filp
;
4243 spin_unlock(&fp
->fi_lock
);
4247 status
= nfsd4_truncate(rqstp
, cur_fh
, open
);
4249 goto out_put_access
;
4253 stp
->st_access_bmap
= old_access_bmap
;
4254 nfs4_file_put_access(fp
, open
->op_share_access
);
4255 reset_union_bmap_deny(bmap_to_share_mode(old_deny_bmap
), stp
);
4260 nfs4_upgrade_open(struct svc_rqst
*rqstp
, struct nfs4_file
*fp
, struct svc_fh
*cur_fh
, struct nfs4_ol_stateid
*stp
, struct nfsd4_open
*open
)
4263 unsigned char old_deny_bmap
= stp
->st_deny_bmap
;
4265 if (!test_access(open
->op_share_access
, stp
))
4266 return nfs4_get_vfs_file(rqstp
, fp
, cur_fh
, stp
, open
);
4268 /* test and set deny mode */
4269 spin_lock(&fp
->fi_lock
);
4270 status
= nfs4_file_check_deny(fp
, open
->op_share_deny
);
4271 if (status
== nfs_ok
) {
4272 set_deny(open
->op_share_deny
, stp
);
4273 fp
->fi_share_deny
|=
4274 (open
->op_share_deny
& NFS4_SHARE_DENY_BOTH
);
4276 spin_unlock(&fp
->fi_lock
);
4278 if (status
!= nfs_ok
)
4281 status
= nfsd4_truncate(rqstp
, cur_fh
, open
);
4282 if (status
!= nfs_ok
)
4283 reset_union_bmap_deny(old_deny_bmap
, stp
);
4287 /* Should we give out recallable state?: */
4288 static bool nfsd4_cb_channel_good(struct nfs4_client
*clp
)
4290 if (clp
->cl_cb_state
== NFSD4_CB_UP
)
4293 * In the sessions case, since we don't have to establish a
4294 * separate connection for callbacks, we assume it's OK
4295 * until we hear otherwise:
4297 return clp
->cl_minorversion
&& clp
->cl_cb_state
== NFSD4_CB_UNKNOWN
;
4300 static struct file_lock
*nfs4_alloc_init_lease(struct nfs4_delegation
*dp
,
4303 struct file_lock
*fl
;
4305 fl
= locks_alloc_lock();
4308 fl
->fl_lmops
= &nfsd_lease_mng_ops
;
4309 fl
->fl_flags
= FL_DELEG
;
4310 fl
->fl_type
= flag
== NFS4_OPEN_DELEGATE_READ
? F_RDLCK
: F_WRLCK
;
4311 fl
->fl_end
= OFFSET_MAX
;
4312 fl
->fl_owner
= (fl_owner_t
)dp
;
4313 fl
->fl_pid
= current
->tgid
;
4314 fl
->fl_file
= dp
->dl_stid
.sc_file
->fi_deleg_file
;
4318 static struct nfs4_delegation
*
4319 nfs4_set_delegation(struct nfs4_client
*clp
, struct svc_fh
*fh
,
4320 struct nfs4_file
*fp
, struct nfs4_clnt_odstate
*odstate
)
4323 struct nfs4_delegation
*dp
;
4325 struct file_lock
*fl
;
4328 * The fi_had_conflict and nfs_get_existing_delegation checks
4329 * here are just optimizations; we'll need to recheck them at
4332 if (fp
->fi_had_conflict
)
4333 return ERR_PTR(-EAGAIN
);
4335 filp
= find_readable_file(fp
);
4337 /* We should always have a readable file here */
4339 return ERR_PTR(-EBADF
);
4341 spin_lock(&state_lock
);
4342 spin_lock(&fp
->fi_lock
);
4343 if (nfs4_delegation_exists(clp
, fp
))
4345 else if (!fp
->fi_deleg_file
) {
4346 fp
->fi_deleg_file
= filp
;
4347 /* increment early to prevent fi_deleg_file from being
4349 fp
->fi_delegees
= 1;
4353 spin_unlock(&fp
->fi_lock
);
4354 spin_unlock(&state_lock
);
4358 return ERR_PTR(status
);
4361 dp
= alloc_init_deleg(clp
, fp
, fh
, odstate
);
4365 fl
= nfs4_alloc_init_lease(dp
, NFS4_OPEN_DELEGATE_READ
);
4369 status
= vfs_setlease(fp
->fi_deleg_file
, fl
->fl_type
, &fl
, NULL
);
4371 locks_free_lock(fl
);
4373 goto out_clnt_odstate
;
4375 spin_lock(&state_lock
);
4376 spin_lock(&fp
->fi_lock
);
4377 if (fp
->fi_had_conflict
)
4380 status
= hash_delegation_locked(dp
, fp
);
4381 spin_unlock(&fp
->fi_lock
);
4382 spin_unlock(&state_lock
);
4389 vfs_setlease(fp
->fi_deleg_file
, F_UNLCK
, NULL
, (void **)&dp
);
4391 put_clnt_odstate(dp
->dl_clnt_odstate
);
4393 nfs4_put_stid(&dp
->dl_stid
);
4396 return ERR_PTR(status
);
4399 static void nfsd4_open_deleg_none_ext(struct nfsd4_open
*open
, int status
)
4401 open
->op_delegate_type
= NFS4_OPEN_DELEGATE_NONE_EXT
;
4402 if (status
== -EAGAIN
)
4403 open
->op_why_no_deleg
= WND4_CONTENTION
;
4405 open
->op_why_no_deleg
= WND4_RESOURCE
;
4406 switch (open
->op_deleg_want
) {
4407 case NFS4_SHARE_WANT_READ_DELEG
:
4408 case NFS4_SHARE_WANT_WRITE_DELEG
:
4409 case NFS4_SHARE_WANT_ANY_DELEG
:
4411 case NFS4_SHARE_WANT_CANCEL
:
4412 open
->op_why_no_deleg
= WND4_CANCELLED
;
4414 case NFS4_SHARE_WANT_NO_DELEG
:
4421 * Attempt to hand out a delegation.
4423 * Note we don't support write delegations, and won't until the vfs has
4424 * proper support for them.
4427 nfs4_open_delegation(struct svc_fh
*fh
, struct nfsd4_open
*open
,
4428 struct nfs4_ol_stateid
*stp
)
4430 struct nfs4_delegation
*dp
;
4431 struct nfs4_openowner
*oo
= openowner(stp
->st_stateowner
);
4432 struct nfs4_client
*clp
= stp
->st_stid
.sc_client
;
4436 cb_up
= nfsd4_cb_channel_good(oo
->oo_owner
.so_client
);
4437 open
->op_recall
= 0;
4438 switch (open
->op_claim_type
) {
4439 case NFS4_OPEN_CLAIM_PREVIOUS
:
4441 open
->op_recall
= 1;
4442 if (open
->op_delegate_type
!= NFS4_OPEN_DELEGATE_READ
)
4445 case NFS4_OPEN_CLAIM_NULL
:
4446 case NFS4_OPEN_CLAIM_FH
:
4448 * Let's not give out any delegations till everyone's
4449 * had the chance to reclaim theirs, *and* until
4450 * NLM locks have all been reclaimed:
4452 if (locks_in_grace(clp
->net
))
4454 if (!cb_up
|| !(oo
->oo_flags
& NFS4_OO_CONFIRMED
))
4457 * Also, if the file was opened for write or
4458 * create, there's a good chance the client's
4459 * about to write to it, resulting in an
4460 * immediate recall (since we don't support
4461 * write delegations):
4463 if (open
->op_share_access
& NFS4_SHARE_ACCESS_WRITE
)
4465 if (open
->op_create
== NFS4_OPEN_CREATE
)
4471 dp
= nfs4_set_delegation(clp
, fh
, stp
->st_stid
.sc_file
, stp
->st_clnt_odstate
);
4475 memcpy(&open
->op_delegate_stateid
, &dp
->dl_stid
.sc_stateid
, sizeof(dp
->dl_stid
.sc_stateid
));
4477 dprintk("NFSD: delegation stateid=" STATEID_FMT
"\n",
4478 STATEID_VAL(&dp
->dl_stid
.sc_stateid
));
4479 open
->op_delegate_type
= NFS4_OPEN_DELEGATE_READ
;
4480 nfs4_put_stid(&dp
->dl_stid
);
4483 open
->op_delegate_type
= NFS4_OPEN_DELEGATE_NONE
;
4484 if (open
->op_claim_type
== NFS4_OPEN_CLAIM_PREVIOUS
&&
4485 open
->op_delegate_type
!= NFS4_OPEN_DELEGATE_NONE
) {
4486 dprintk("NFSD: WARNING: refusing delegation reclaim\n");
4487 open
->op_recall
= 1;
4490 /* 4.1 client asking for a delegation? */
4491 if (open
->op_deleg_want
)
4492 nfsd4_open_deleg_none_ext(open
, status
);
4496 static void nfsd4_deleg_xgrade_none_ext(struct nfsd4_open
*open
,
4497 struct nfs4_delegation
*dp
)
4499 if (open
->op_deleg_want
== NFS4_SHARE_WANT_READ_DELEG
&&
4500 dp
->dl_type
== NFS4_OPEN_DELEGATE_WRITE
) {
4501 open
->op_delegate_type
= NFS4_OPEN_DELEGATE_NONE_EXT
;
4502 open
->op_why_no_deleg
= WND4_NOT_SUPP_DOWNGRADE
;
4503 } else if (open
->op_deleg_want
== NFS4_SHARE_WANT_WRITE_DELEG
&&
4504 dp
->dl_type
== NFS4_OPEN_DELEGATE_WRITE
) {
4505 open
->op_delegate_type
= NFS4_OPEN_DELEGATE_NONE_EXT
;
4506 open
->op_why_no_deleg
= WND4_NOT_SUPP_UPGRADE
;
4508 /* Otherwise the client must be confused wanting a delegation
4509 * it already has, therefore we don't return
4510 * NFS4_OPEN_DELEGATE_NONE_EXT and reason.
4515 nfsd4_process_open2(struct svc_rqst
*rqstp
, struct svc_fh
*current_fh
, struct nfsd4_open
*open
)
4517 struct nfsd4_compoundres
*resp
= rqstp
->rq_resp
;
4518 struct nfs4_client
*cl
= open
->op_openowner
->oo_owner
.so_client
;
4519 struct nfs4_file
*fp
= NULL
;
4520 struct nfs4_ol_stateid
*stp
= NULL
;
4521 struct nfs4_delegation
*dp
= NULL
;
4523 bool new_stp
= false;
4526 * Lookup file; if found, lookup stateid and check open request,
4527 * and check for delegations in the process of being recalled.
4528 * If not found, create the nfs4_file struct
4530 fp
= find_or_add_file(open
->op_file
, ¤t_fh
->fh_handle
);
4531 if (fp
!= open
->op_file
) {
4532 status
= nfs4_check_deleg(cl
, open
, &dp
);
4535 stp
= nfsd4_find_and_lock_existing_open(fp
, open
);
4537 open
->op_file
= NULL
;
4538 status
= nfserr_bad_stateid
;
4539 if (nfsd4_is_deleg_cur(open
))
4544 stp
= init_open_stateid(fp
, open
);
4550 * OPEN the file, or upgrade an existing OPEN.
4551 * If truncate fails, the OPEN fails.
4553 * stp is already locked.
4556 /* Stateid was found, this is an OPEN upgrade */
4557 status
= nfs4_upgrade_open(rqstp
, fp
, current_fh
, stp
, open
);
4559 mutex_unlock(&stp
->st_mutex
);
4563 status
= nfs4_get_vfs_file(rqstp
, fp
, current_fh
, stp
, open
);
4565 stp
->st_stid
.sc_type
= NFS4_CLOSED_STID
;
4566 release_open_stateid(stp
);
4567 mutex_unlock(&stp
->st_mutex
);
4571 stp
->st_clnt_odstate
= find_or_hash_clnt_odstate(fp
,
4573 if (stp
->st_clnt_odstate
== open
->op_odstate
)
4574 open
->op_odstate
= NULL
;
4577 nfs4_inc_and_copy_stateid(&open
->op_stateid
, &stp
->st_stid
);
4578 mutex_unlock(&stp
->st_mutex
);
4580 if (nfsd4_has_session(&resp
->cstate
)) {
4581 if (open
->op_deleg_want
& NFS4_SHARE_WANT_NO_DELEG
) {
4582 open
->op_delegate_type
= NFS4_OPEN_DELEGATE_NONE_EXT
;
4583 open
->op_why_no_deleg
= WND4_NOT_WANTED
;
4589 * Attempt to hand out a delegation. No error return, because the
4590 * OPEN succeeds even if we fail.
4592 nfs4_open_delegation(current_fh
, open
, stp
);
4596 dprintk("%s: stateid=" STATEID_FMT
"\n", __func__
,
4597 STATEID_VAL(&stp
->st_stid
.sc_stateid
));
4599 /* 4.1 client trying to upgrade/downgrade delegation? */
4600 if (open
->op_delegate_type
== NFS4_OPEN_DELEGATE_NONE
&& dp
&&
4601 open
->op_deleg_want
)
4602 nfsd4_deleg_xgrade_none_ext(open
, dp
);
4606 if (status
== 0 && open
->op_claim_type
== NFS4_OPEN_CLAIM_PREVIOUS
)
4607 open
->op_openowner
->oo_flags
|= NFS4_OO_CONFIRMED
;
4609 * To finish the open response, we just need to set the rflags.
4611 open
->op_rflags
= NFS4_OPEN_RESULT_LOCKTYPE_POSIX
;
4612 if (nfsd4_has_session(&resp
->cstate
))
4613 open
->op_rflags
|= NFS4_OPEN_RESULT_MAY_NOTIFY_LOCK
;
4614 else if (!(open
->op_openowner
->oo_flags
& NFS4_OO_CONFIRMED
))
4615 open
->op_rflags
|= NFS4_OPEN_RESULT_CONFIRM
;
4618 nfs4_put_stid(&dp
->dl_stid
);
4620 nfs4_put_stid(&stp
->st_stid
);
4625 void nfsd4_cleanup_open_state(struct nfsd4_compound_state
*cstate
,
4626 struct nfsd4_open
*open
)
4628 if (open
->op_openowner
) {
4629 struct nfs4_stateowner
*so
= &open
->op_openowner
->oo_owner
;
4631 nfsd4_cstate_assign_replay(cstate
, so
);
4632 nfs4_put_stateowner(so
);
4635 kmem_cache_free(file_slab
, open
->op_file
);
4637 nfs4_put_stid(&open
->op_stp
->st_stid
);
4638 if (open
->op_odstate
)
4639 kmem_cache_free(odstate_slab
, open
->op_odstate
);
4643 nfsd4_renew(struct svc_rqst
*rqstp
, struct nfsd4_compound_state
*cstate
,
4644 union nfsd4_op_u
*u
)
4646 clientid_t
*clid
= &u
->renew
;
4647 struct nfs4_client
*clp
;
4649 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
4651 dprintk("process_renew(%08x/%08x): starting\n",
4652 clid
->cl_boot
, clid
->cl_id
);
4653 status
= lookup_clientid(clid
, cstate
, nn
);
4657 status
= nfserr_cb_path_down
;
4658 if (!list_empty(&clp
->cl_delegations
)
4659 && clp
->cl_cb_state
!= NFSD4_CB_UP
)
4667 nfsd4_end_grace(struct nfsd_net
*nn
)
4669 /* do nothing if grace period already ended */
4670 if (nn
->grace_ended
)
4673 dprintk("NFSD: end of grace period\n");
4674 nn
->grace_ended
= true;
4676 * If the server goes down again right now, an NFSv4
4677 * client will still be allowed to reclaim after it comes back up,
4678 * even if it hasn't yet had a chance to reclaim state this time.
4681 nfsd4_record_grace_done(nn
);
4683 * At this point, NFSv4 clients can still reclaim. But if the
4684 * server crashes, any that have not yet reclaimed will be out
4685 * of luck on the next boot.
4687 * (NFSv4.1+ clients are considered to have reclaimed once they
4688 * call RECLAIM_COMPLETE. NFSv4.0 clients are considered to
4689 * have reclaimed after their first OPEN.)
4691 locks_end_grace(&nn
->nfsd4_manager
);
4693 * At this point, and once lockd and/or any other containers
4694 * exit their grace period, further reclaims will fail and
4695 * regular locking can resume.
4700 * If we've waited a lease period but there are still clients trying to
4701 * reclaim, wait a little longer to give them a chance to finish.
4703 static bool clients_still_reclaiming(struct nfsd_net
*nn
)
4705 unsigned long now
= get_seconds();
4706 unsigned long double_grace_period_end
= nn
->boot_time
+
4707 2 * nn
->nfsd4_lease
;
4709 if (!nn
->somebody_reclaimed
)
4711 nn
->somebody_reclaimed
= false;
4713 * If we've given them *two* lease times to reclaim, and they're
4714 * still not done, give up:
4716 if (time_after(now
, double_grace_period_end
))
4722 nfs4_laundromat(struct nfsd_net
*nn
)
4724 struct nfs4_client
*clp
;
4725 struct nfs4_openowner
*oo
;
4726 struct nfs4_delegation
*dp
;
4727 struct nfs4_ol_stateid
*stp
;
4728 struct nfsd4_blocked_lock
*nbl
;
4729 struct list_head
*pos
, *next
, reaplist
;
4730 time_t cutoff
= get_seconds() - nn
->nfsd4_lease
;
4731 time_t t
, new_timeo
= nn
->nfsd4_lease
;
4733 dprintk("NFSD: laundromat service - starting\n");
4735 if (clients_still_reclaiming(nn
)) {
4739 nfsd4_end_grace(nn
);
4740 INIT_LIST_HEAD(&reaplist
);
4741 spin_lock(&nn
->client_lock
);
4742 list_for_each_safe(pos
, next
, &nn
->client_lru
) {
4743 clp
= list_entry(pos
, struct nfs4_client
, cl_lru
);
4744 if (time_after((unsigned long)clp
->cl_time
, (unsigned long)cutoff
)) {
4745 t
= clp
->cl_time
- cutoff
;
4746 new_timeo
= min(new_timeo
, t
);
4749 if (mark_client_expired_locked(clp
)) {
4750 dprintk("NFSD: client in use (clientid %08x)\n",
4751 clp
->cl_clientid
.cl_id
);
4754 list_add(&clp
->cl_lru
, &reaplist
);
4756 spin_unlock(&nn
->client_lock
);
4757 list_for_each_safe(pos
, next
, &reaplist
) {
4758 clp
= list_entry(pos
, struct nfs4_client
, cl_lru
);
4759 dprintk("NFSD: purging unused client (clientid %08x)\n",
4760 clp
->cl_clientid
.cl_id
);
4761 list_del_init(&clp
->cl_lru
);
4764 spin_lock(&state_lock
);
4765 list_for_each_safe(pos
, next
, &nn
->del_recall_lru
) {
4766 dp
= list_entry (pos
, struct nfs4_delegation
, dl_recall_lru
);
4767 if (time_after((unsigned long)dp
->dl_time
, (unsigned long)cutoff
)) {
4768 t
= dp
->dl_time
- cutoff
;
4769 new_timeo
= min(new_timeo
, t
);
4772 WARN_ON(!unhash_delegation_locked(dp
));
4773 list_add(&dp
->dl_recall_lru
, &reaplist
);
4775 spin_unlock(&state_lock
);
4776 while (!list_empty(&reaplist
)) {
4777 dp
= list_first_entry(&reaplist
, struct nfs4_delegation
,
4779 list_del_init(&dp
->dl_recall_lru
);
4780 revoke_delegation(dp
);
4783 spin_lock(&nn
->client_lock
);
4784 while (!list_empty(&nn
->close_lru
)) {
4785 oo
= list_first_entry(&nn
->close_lru
, struct nfs4_openowner
,
4787 if (time_after((unsigned long)oo
->oo_time
,
4788 (unsigned long)cutoff
)) {
4789 t
= oo
->oo_time
- cutoff
;
4790 new_timeo
= min(new_timeo
, t
);
4793 list_del_init(&oo
->oo_close_lru
);
4794 stp
= oo
->oo_last_closed_stid
;
4795 oo
->oo_last_closed_stid
= NULL
;
4796 spin_unlock(&nn
->client_lock
);
4797 nfs4_put_stid(&stp
->st_stid
);
4798 spin_lock(&nn
->client_lock
);
4800 spin_unlock(&nn
->client_lock
);
4803 * It's possible for a client to try and acquire an already held lock
4804 * that is being held for a long time, and then lose interest in it.
4805 * So, we clean out any un-revisited request after a lease period
4806 * under the assumption that the client is no longer interested.
4808 * RFC5661, sec. 9.6 states that the client must not rely on getting
4809 * notifications and must continue to poll for locks, even when the
4810 * server supports them. Thus this shouldn't lead to clients blocking
4811 * indefinitely once the lock does become free.
4813 BUG_ON(!list_empty(&reaplist
));
4814 spin_lock(&nn
->blocked_locks_lock
);
4815 while (!list_empty(&nn
->blocked_locks_lru
)) {
4816 nbl
= list_first_entry(&nn
->blocked_locks_lru
,
4817 struct nfsd4_blocked_lock
, nbl_lru
);
4818 if (time_after((unsigned long)nbl
->nbl_time
,
4819 (unsigned long)cutoff
)) {
4820 t
= nbl
->nbl_time
- cutoff
;
4821 new_timeo
= min(new_timeo
, t
);
4824 list_move(&nbl
->nbl_lru
, &reaplist
);
4825 list_del_init(&nbl
->nbl_list
);
4827 spin_unlock(&nn
->blocked_locks_lock
);
4829 while (!list_empty(&reaplist
)) {
4830 nbl
= list_first_entry(&reaplist
,
4831 struct nfsd4_blocked_lock
, nbl_lru
);
4832 list_del_init(&nbl
->nbl_lru
);
4833 posix_unblock_lock(&nbl
->nbl_lock
);
4834 free_blocked_lock(nbl
);
4837 new_timeo
= max_t(time_t, new_timeo
, NFSD_LAUNDROMAT_MINTIMEOUT
);
4841 static struct workqueue_struct
*laundry_wq
;
4842 static void laundromat_main(struct work_struct
*);
4845 laundromat_main(struct work_struct
*laundry
)
4848 struct delayed_work
*dwork
= to_delayed_work(laundry
);
4849 struct nfsd_net
*nn
= container_of(dwork
, struct nfsd_net
,
4852 t
= nfs4_laundromat(nn
);
4853 dprintk("NFSD: laundromat_main - sleeping for %ld seconds\n", t
);
4854 queue_delayed_work(laundry_wq
, &nn
->laundromat_work
, t
*HZ
);
4857 static inline __be32
nfs4_check_fh(struct svc_fh
*fhp
, struct nfs4_stid
*stp
)
4859 if (!fh_match(&fhp
->fh_handle
, &stp
->sc_file
->fi_fhandle
))
4860 return nfserr_bad_stateid
;
4865 access_permit_read(struct nfs4_ol_stateid
*stp
)
4867 return test_access(NFS4_SHARE_ACCESS_READ
, stp
) ||
4868 test_access(NFS4_SHARE_ACCESS_BOTH
, stp
) ||
4869 test_access(NFS4_SHARE_ACCESS_WRITE
, stp
);
4873 access_permit_write(struct nfs4_ol_stateid
*stp
)
4875 return test_access(NFS4_SHARE_ACCESS_WRITE
, stp
) ||
4876 test_access(NFS4_SHARE_ACCESS_BOTH
, stp
);
4880 __be32
nfs4_check_openmode(struct nfs4_ol_stateid
*stp
, int flags
)
4882 __be32 status
= nfserr_openmode
;
4884 /* For lock stateid's, we test the parent open, not the lock: */
4885 if (stp
->st_openstp
)
4886 stp
= stp
->st_openstp
;
4887 if ((flags
& WR_STATE
) && !access_permit_write(stp
))
4889 if ((flags
& RD_STATE
) && !access_permit_read(stp
))
4896 static inline __be32
4897 check_special_stateids(struct net
*net
, svc_fh
*current_fh
, stateid_t
*stateid
, int flags
)
4899 if (ONE_STATEID(stateid
) && (flags
& RD_STATE
))
4901 else if (opens_in_grace(net
)) {
4902 /* Answer in remaining cases depends on existence of
4903 * conflicting state; so we must wait out the grace period. */
4904 return nfserr_grace
;
4905 } else if (flags
& WR_STATE
)
4906 return nfs4_share_conflict(current_fh
,
4907 NFS4_SHARE_DENY_WRITE
);
4908 else /* (flags & RD_STATE) && ZERO_STATEID(stateid) */
4909 return nfs4_share_conflict(current_fh
,
4910 NFS4_SHARE_DENY_READ
);
4914 * Allow READ/WRITE during grace period on recovered state only for files
4915 * that are not able to provide mandatory locking.
4918 grace_disallows_io(struct net
*net
, struct inode
*inode
)
4920 return opens_in_grace(net
) && mandatory_lock(inode
);
4923 static __be32
check_stateid_generation(stateid_t
*in
, stateid_t
*ref
, bool has_session
)
4926 * When sessions are used the stateid generation number is ignored
4929 if (has_session
&& in
->si_generation
== 0)
4932 if (in
->si_generation
== ref
->si_generation
)
4935 /* If the client sends us a stateid from the future, it's buggy: */
4936 if (nfsd4_stateid_generation_after(in
, ref
))
4937 return nfserr_bad_stateid
;
4939 * However, we could see a stateid from the past, even from a
4940 * non-buggy client. For example, if the client sends a lock
4941 * while some IO is outstanding, the lock may bump si_generation
4942 * while the IO is still in flight. The client could avoid that
4943 * situation by waiting for responses on all the IO requests,
4944 * but better performance may result in retrying IO that
4945 * receives an old_stateid error if requests are rarely
4946 * reordered in flight:
4948 return nfserr_old_stateid
;
4951 static __be32
nfsd4_stid_check_stateid_generation(stateid_t
*in
, struct nfs4_stid
*s
, bool has_session
)
4955 spin_lock(&s
->sc_lock
);
4956 ret
= nfsd4_verify_open_stid(s
);
4958 ret
= check_stateid_generation(in
, &s
->sc_stateid
, has_session
);
4959 spin_unlock(&s
->sc_lock
);
4963 static __be32
nfsd4_check_openowner_confirmed(struct nfs4_ol_stateid
*ols
)
4965 if (ols
->st_stateowner
->so_is_open_owner
&&
4966 !(openowner(ols
->st_stateowner
)->oo_flags
& NFS4_OO_CONFIRMED
))
4967 return nfserr_bad_stateid
;
4971 static __be32
nfsd4_validate_stateid(struct nfs4_client
*cl
, stateid_t
*stateid
)
4973 struct nfs4_stid
*s
;
4974 __be32 status
= nfserr_bad_stateid
;
4976 if (ZERO_STATEID(stateid
) || ONE_STATEID(stateid
) ||
4977 CLOSE_STATEID(stateid
))
4979 /* Client debugging aid. */
4980 if (!same_clid(&stateid
->si_opaque
.so_clid
, &cl
->cl_clientid
)) {
4981 char addr_str
[INET6_ADDRSTRLEN
];
4982 rpc_ntop((struct sockaddr
*)&cl
->cl_addr
, addr_str
,
4984 pr_warn_ratelimited("NFSD: client %s testing state ID "
4985 "with incorrect client ID\n", addr_str
);
4988 spin_lock(&cl
->cl_lock
);
4989 s
= find_stateid_locked(cl
, stateid
);
4992 status
= nfsd4_stid_check_stateid_generation(stateid
, s
, 1);
4995 switch (s
->sc_type
) {
4996 case NFS4_DELEG_STID
:
4999 case NFS4_REVOKED_DELEG_STID
:
5000 status
= nfserr_deleg_revoked
;
5002 case NFS4_OPEN_STID
:
5003 case NFS4_LOCK_STID
:
5004 status
= nfsd4_check_openowner_confirmed(openlockstateid(s
));
5007 printk("unknown stateid type %x\n", s
->sc_type
);
5009 case NFS4_CLOSED_STID
:
5010 case NFS4_CLOSED_DELEG_STID
:
5011 status
= nfserr_bad_stateid
;
5014 spin_unlock(&cl
->cl_lock
);
5019 nfsd4_lookup_stateid(struct nfsd4_compound_state
*cstate
,
5020 stateid_t
*stateid
, unsigned char typemask
,
5021 struct nfs4_stid
**s
, struct nfsd_net
*nn
)
5024 bool return_revoked
= false;
5027 * only return revoked delegations if explicitly asked.
5028 * otherwise we report revoked or bad_stateid status.
5030 if (typemask
& NFS4_REVOKED_DELEG_STID
)
5031 return_revoked
= true;
5032 else if (typemask
& NFS4_DELEG_STID
)
5033 typemask
|= NFS4_REVOKED_DELEG_STID
;
5035 if (ZERO_STATEID(stateid
) || ONE_STATEID(stateid
) ||
5036 CLOSE_STATEID(stateid
))
5037 return nfserr_bad_stateid
;
5038 status
= lookup_clientid(&stateid
->si_opaque
.so_clid
, cstate
, nn
);
5039 if (status
== nfserr_stale_clientid
) {
5040 if (cstate
->session
)
5041 return nfserr_bad_stateid
;
5042 return nfserr_stale_stateid
;
5046 *s
= find_stateid_by_type(cstate
->clp
, stateid
, typemask
);
5048 return nfserr_bad_stateid
;
5049 if (((*s
)->sc_type
== NFS4_REVOKED_DELEG_STID
) && !return_revoked
) {
5051 if (cstate
->minorversion
)
5052 return nfserr_deleg_revoked
;
5053 return nfserr_bad_stateid
;
5058 static struct file
*
5059 nfs4_find_file(struct nfs4_stid
*s
, int flags
)
5064 switch (s
->sc_type
) {
5065 case NFS4_DELEG_STID
:
5066 if (WARN_ON_ONCE(!s
->sc_file
->fi_deleg_file
))
5068 return get_file(s
->sc_file
->fi_deleg_file
);
5069 case NFS4_OPEN_STID
:
5070 case NFS4_LOCK_STID
:
5071 if (flags
& RD_STATE
)
5072 return find_readable_file(s
->sc_file
);
5074 return find_writeable_file(s
->sc_file
);
5082 nfs4_check_olstateid(struct svc_fh
*fhp
, struct nfs4_ol_stateid
*ols
, int flags
)
5086 status
= nfsd4_check_openowner_confirmed(ols
);
5089 return nfs4_check_openmode(ols
, flags
);
5093 nfs4_check_file(struct svc_rqst
*rqstp
, struct svc_fh
*fhp
, struct nfs4_stid
*s
,
5094 struct file
**filpp
, bool *tmp_file
, int flags
)
5096 int acc
= (flags
& RD_STATE
) ? NFSD_MAY_READ
: NFSD_MAY_WRITE
;
5100 file
= nfs4_find_file(s
, flags
);
5102 status
= nfsd_permission(rqstp
, fhp
->fh_export
, fhp
->fh_dentry
,
5103 acc
| NFSD_MAY_OWNER_OVERRIDE
);
5111 status
= nfsd_open(rqstp
, fhp
, S_IFREG
, acc
, filpp
);
5123 * Checks for stateid operations
5126 nfs4_preprocess_stateid_op(struct svc_rqst
*rqstp
,
5127 struct nfsd4_compound_state
*cstate
, struct svc_fh
*fhp
,
5128 stateid_t
*stateid
, int flags
, struct file
**filpp
, bool *tmp_file
)
5130 struct inode
*ino
= d_inode(fhp
->fh_dentry
);
5131 struct net
*net
= SVC_NET(rqstp
);
5132 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
5133 struct nfs4_stid
*s
= NULL
;
5141 if (grace_disallows_io(net
, ino
))
5142 return nfserr_grace
;
5144 if (ZERO_STATEID(stateid
) || ONE_STATEID(stateid
)) {
5145 status
= check_special_stateids(net
, fhp
, stateid
, flags
);
5149 status
= nfsd4_lookup_stateid(cstate
, stateid
,
5150 NFS4_DELEG_STID
|NFS4_OPEN_STID
|NFS4_LOCK_STID
,
5154 status
= nfsd4_stid_check_stateid_generation(stateid
, s
,
5155 nfsd4_has_session(cstate
));
5159 switch (s
->sc_type
) {
5160 case NFS4_DELEG_STID
:
5161 status
= nfs4_check_delegmode(delegstateid(s
), flags
);
5163 case NFS4_OPEN_STID
:
5164 case NFS4_LOCK_STID
:
5165 status
= nfs4_check_olstateid(fhp
, openlockstateid(s
), flags
);
5168 status
= nfserr_bad_stateid
;
5173 status
= nfs4_check_fh(fhp
, s
);
5176 if (!status
&& filpp
)
5177 status
= nfs4_check_file(rqstp
, fhp
, s
, filpp
, tmp_file
, flags
);
5185 * Test if the stateid is valid
5188 nfsd4_test_stateid(struct svc_rqst
*rqstp
, struct nfsd4_compound_state
*cstate
,
5189 union nfsd4_op_u
*u
)
5191 struct nfsd4_test_stateid
*test_stateid
= &u
->test_stateid
;
5192 struct nfsd4_test_stateid_id
*stateid
;
5193 struct nfs4_client
*cl
= cstate
->session
->se_client
;
5195 list_for_each_entry(stateid
, &test_stateid
->ts_stateid_list
, ts_id_list
)
5196 stateid
->ts_id_status
=
5197 nfsd4_validate_stateid(cl
, &stateid
->ts_id_stateid
);
5203 nfsd4_free_lock_stateid(stateid_t
*stateid
, struct nfs4_stid
*s
)
5205 struct nfs4_ol_stateid
*stp
= openlockstateid(s
);
5208 ret
= nfsd4_lock_ol_stateid(stp
);
5212 ret
= check_stateid_generation(stateid
, &s
->sc_stateid
, 1);
5216 ret
= nfserr_locks_held
;
5217 if (check_for_locks(stp
->st_stid
.sc_file
,
5218 lockowner(stp
->st_stateowner
)))
5221 release_lock_stateid(stp
);
5225 mutex_unlock(&stp
->st_mutex
);
5232 nfsd4_free_stateid(struct svc_rqst
*rqstp
, struct nfsd4_compound_state
*cstate
,
5233 union nfsd4_op_u
*u
)
5235 struct nfsd4_free_stateid
*free_stateid
= &u
->free_stateid
;
5236 stateid_t
*stateid
= &free_stateid
->fr_stateid
;
5237 struct nfs4_stid
*s
;
5238 struct nfs4_delegation
*dp
;
5239 struct nfs4_client
*cl
= cstate
->session
->se_client
;
5240 __be32 ret
= nfserr_bad_stateid
;
5242 spin_lock(&cl
->cl_lock
);
5243 s
= find_stateid_locked(cl
, stateid
);
5246 spin_lock(&s
->sc_lock
);
5247 switch (s
->sc_type
) {
5248 case NFS4_DELEG_STID
:
5249 ret
= nfserr_locks_held
;
5251 case NFS4_OPEN_STID
:
5252 ret
= check_stateid_generation(stateid
, &s
->sc_stateid
, 1);
5255 ret
= nfserr_locks_held
;
5257 case NFS4_LOCK_STID
:
5258 spin_unlock(&s
->sc_lock
);
5259 refcount_inc(&s
->sc_count
);
5260 spin_unlock(&cl
->cl_lock
);
5261 ret
= nfsd4_free_lock_stateid(stateid
, s
);
5263 case NFS4_REVOKED_DELEG_STID
:
5264 spin_unlock(&s
->sc_lock
);
5265 dp
= delegstateid(s
);
5266 list_del_init(&dp
->dl_recall_lru
);
5267 spin_unlock(&cl
->cl_lock
);
5271 /* Default falls through and returns nfserr_bad_stateid */
5273 spin_unlock(&s
->sc_lock
);
5275 spin_unlock(&cl
->cl_lock
);
5283 return (type
== NFS4_READW_LT
|| type
== NFS4_READ_LT
) ?
5284 RD_STATE
: WR_STATE
;
5287 static __be32
nfs4_seqid_op_checks(struct nfsd4_compound_state
*cstate
, stateid_t
*stateid
, u32 seqid
, struct nfs4_ol_stateid
*stp
)
5289 struct svc_fh
*current_fh
= &cstate
->current_fh
;
5290 struct nfs4_stateowner
*sop
= stp
->st_stateowner
;
5293 status
= nfsd4_check_seqid(cstate
, sop
, seqid
);
5296 status
= nfsd4_lock_ol_stateid(stp
);
5297 if (status
!= nfs_ok
)
5299 status
= check_stateid_generation(stateid
, &stp
->st_stid
.sc_stateid
, nfsd4_has_session(cstate
));
5300 if (status
== nfs_ok
)
5301 status
= nfs4_check_fh(current_fh
, &stp
->st_stid
);
5302 if (status
!= nfs_ok
)
5303 mutex_unlock(&stp
->st_mutex
);
5308 * Checks for sequence id mutating operations.
5311 nfs4_preprocess_seqid_op(struct nfsd4_compound_state
*cstate
, u32 seqid
,
5312 stateid_t
*stateid
, char typemask
,
5313 struct nfs4_ol_stateid
**stpp
,
5314 struct nfsd_net
*nn
)
5317 struct nfs4_stid
*s
;
5318 struct nfs4_ol_stateid
*stp
= NULL
;
5320 dprintk("NFSD: %s: seqid=%d stateid = " STATEID_FMT
"\n", __func__
,
5321 seqid
, STATEID_VAL(stateid
));
5324 status
= nfsd4_lookup_stateid(cstate
, stateid
, typemask
, &s
, nn
);
5327 stp
= openlockstateid(s
);
5328 nfsd4_cstate_assign_replay(cstate
, stp
->st_stateowner
);
5330 status
= nfs4_seqid_op_checks(cstate
, stateid
, seqid
, stp
);
5334 nfs4_put_stid(&stp
->st_stid
);
5338 static __be32
nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state
*cstate
, u32 seqid
,
5339 stateid_t
*stateid
, struct nfs4_ol_stateid
**stpp
, struct nfsd_net
*nn
)
5342 struct nfs4_openowner
*oo
;
5343 struct nfs4_ol_stateid
*stp
;
5345 status
= nfs4_preprocess_seqid_op(cstate
, seqid
, stateid
,
5346 NFS4_OPEN_STID
, &stp
, nn
);
5349 oo
= openowner(stp
->st_stateowner
);
5350 if (!(oo
->oo_flags
& NFS4_OO_CONFIRMED
)) {
5351 mutex_unlock(&stp
->st_mutex
);
5352 nfs4_put_stid(&stp
->st_stid
);
5353 return nfserr_bad_stateid
;
5360 nfsd4_open_confirm(struct svc_rqst
*rqstp
, struct nfsd4_compound_state
*cstate
,
5361 union nfsd4_op_u
*u
)
5363 struct nfsd4_open_confirm
*oc
= &u
->open_confirm
;
5365 struct nfs4_openowner
*oo
;
5366 struct nfs4_ol_stateid
*stp
;
5367 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
5369 dprintk("NFSD: nfsd4_open_confirm on file %pd\n",
5370 cstate
->current_fh
.fh_dentry
);
5372 status
= fh_verify(rqstp
, &cstate
->current_fh
, S_IFREG
, 0);
5376 status
= nfs4_preprocess_seqid_op(cstate
,
5377 oc
->oc_seqid
, &oc
->oc_req_stateid
,
5378 NFS4_OPEN_STID
, &stp
, nn
);
5381 oo
= openowner(stp
->st_stateowner
);
5382 status
= nfserr_bad_stateid
;
5383 if (oo
->oo_flags
& NFS4_OO_CONFIRMED
) {
5384 mutex_unlock(&stp
->st_mutex
);
5387 oo
->oo_flags
|= NFS4_OO_CONFIRMED
;
5388 nfs4_inc_and_copy_stateid(&oc
->oc_resp_stateid
, &stp
->st_stid
);
5389 mutex_unlock(&stp
->st_mutex
);
5390 dprintk("NFSD: %s: success, seqid=%d stateid=" STATEID_FMT
"\n",
5391 __func__
, oc
->oc_seqid
, STATEID_VAL(&stp
->st_stid
.sc_stateid
));
5393 nfsd4_client_record_create(oo
->oo_owner
.so_client
);
5396 nfs4_put_stid(&stp
->st_stid
);
5398 nfsd4_bump_seqid(cstate
, status
);
5402 static inline void nfs4_stateid_downgrade_bit(struct nfs4_ol_stateid
*stp
, u32 access
)
5404 if (!test_access(access
, stp
))
5406 nfs4_file_put_access(stp
->st_stid
.sc_file
, access
);
5407 clear_access(access
, stp
);
5410 static inline void nfs4_stateid_downgrade(struct nfs4_ol_stateid
*stp
, u32 to_access
)
5412 switch (to_access
) {
5413 case NFS4_SHARE_ACCESS_READ
:
5414 nfs4_stateid_downgrade_bit(stp
, NFS4_SHARE_ACCESS_WRITE
);
5415 nfs4_stateid_downgrade_bit(stp
, NFS4_SHARE_ACCESS_BOTH
);
5417 case NFS4_SHARE_ACCESS_WRITE
:
5418 nfs4_stateid_downgrade_bit(stp
, NFS4_SHARE_ACCESS_READ
);
5419 nfs4_stateid_downgrade_bit(stp
, NFS4_SHARE_ACCESS_BOTH
);
5421 case NFS4_SHARE_ACCESS_BOTH
:
5429 nfsd4_open_downgrade(struct svc_rqst
*rqstp
,
5430 struct nfsd4_compound_state
*cstate
, union nfsd4_op_u
*u
)
5432 struct nfsd4_open_downgrade
*od
= &u
->open_downgrade
;
5434 struct nfs4_ol_stateid
*stp
;
5435 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
5437 dprintk("NFSD: nfsd4_open_downgrade on file %pd\n",
5438 cstate
->current_fh
.fh_dentry
);
5440 /* We don't yet support WANT bits: */
5441 if (od
->od_deleg_want
)
5442 dprintk("NFSD: %s: od_deleg_want=0x%x ignored\n", __func__
,
5445 status
= nfs4_preprocess_confirmed_seqid_op(cstate
, od
->od_seqid
,
5446 &od
->od_stateid
, &stp
, nn
);
5449 status
= nfserr_inval
;
5450 if (!test_access(od
->od_share_access
, stp
)) {
5451 dprintk("NFSD: access not a subset of current bitmap: 0x%hhx, input access=%08x\n",
5452 stp
->st_access_bmap
, od
->od_share_access
);
5455 if (!test_deny(od
->od_share_deny
, stp
)) {
5456 dprintk("NFSD: deny not a subset of current bitmap: 0x%hhx, input deny=%08x\n",
5457 stp
->st_deny_bmap
, od
->od_share_deny
);
5460 nfs4_stateid_downgrade(stp
, od
->od_share_access
);
5461 reset_union_bmap_deny(od
->od_share_deny
, stp
);
5462 nfs4_inc_and_copy_stateid(&od
->od_stateid
, &stp
->st_stid
);
5465 mutex_unlock(&stp
->st_mutex
);
5466 nfs4_put_stid(&stp
->st_stid
);
5468 nfsd4_bump_seqid(cstate
, status
);
5472 static void nfsd4_close_open_stateid(struct nfs4_ol_stateid
*s
)
5474 struct nfs4_client
*clp
= s
->st_stid
.sc_client
;
5476 LIST_HEAD(reaplist
);
5478 spin_lock(&clp
->cl_lock
);
5479 unhashed
= unhash_open_stateid(s
, &reaplist
);
5481 if (clp
->cl_minorversion
) {
5483 put_ol_stateid_locked(s
, &reaplist
);
5484 spin_unlock(&clp
->cl_lock
);
5485 free_ol_stateid_reaplist(&reaplist
);
5487 spin_unlock(&clp
->cl_lock
);
5488 free_ol_stateid_reaplist(&reaplist
);
5490 move_to_close_lru(s
, clp
->net
);
5495 * nfs4_unlock_state() called after encode
5498 nfsd4_close(struct svc_rqst
*rqstp
, struct nfsd4_compound_state
*cstate
,
5499 union nfsd4_op_u
*u
)
5501 struct nfsd4_close
*close
= &u
->close
;
5503 struct nfs4_ol_stateid
*stp
;
5504 struct net
*net
= SVC_NET(rqstp
);
5505 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
5507 dprintk("NFSD: nfsd4_close on file %pd\n",
5508 cstate
->current_fh
.fh_dentry
);
5510 status
= nfs4_preprocess_seqid_op(cstate
, close
->cl_seqid
,
5512 NFS4_OPEN_STID
|NFS4_CLOSED_STID
,
5514 nfsd4_bump_seqid(cstate
, status
);
5518 stp
->st_stid
.sc_type
= NFS4_CLOSED_STID
;
5521 * Technically we don't _really_ have to increment or copy it, since
5522 * it should just be gone after this operation and we clobber the
5523 * copied value below, but we continue to do so here just to ensure
5524 * that racing ops see that there was a state change.
5526 nfs4_inc_and_copy_stateid(&close
->cl_stateid
, &stp
->st_stid
);
5528 nfsd4_close_open_stateid(stp
);
5529 mutex_unlock(&stp
->st_mutex
);
5531 /* v4.1+ suggests that we send a special stateid in here, since the
5532 * clients should just ignore this anyway. Since this is not useful
5533 * for v4.0 clients either, we set it to the special close_stateid
5536 * See RFC5661 section 18.2.4, and RFC7530 section 16.2.5
5538 memcpy(&close
->cl_stateid
, &close_stateid
, sizeof(close
->cl_stateid
));
5540 /* put reference from nfs4_preprocess_seqid_op */
5541 nfs4_put_stid(&stp
->st_stid
);
5547 nfsd4_delegreturn(struct svc_rqst
*rqstp
, struct nfsd4_compound_state
*cstate
,
5548 union nfsd4_op_u
*u
)
5550 struct nfsd4_delegreturn
*dr
= &u
->delegreturn
;
5551 struct nfs4_delegation
*dp
;
5552 stateid_t
*stateid
= &dr
->dr_stateid
;
5553 struct nfs4_stid
*s
;
5555 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
5557 if ((status
= fh_verify(rqstp
, &cstate
->current_fh
, S_IFREG
, 0)))
5560 status
= nfsd4_lookup_stateid(cstate
, stateid
, NFS4_DELEG_STID
, &s
, nn
);
5563 dp
= delegstateid(s
);
5564 status
= nfsd4_stid_check_stateid_generation(stateid
, &dp
->dl_stid
, nfsd4_has_session(cstate
));
5568 destroy_delegation(dp
);
5570 nfs4_put_stid(&dp
->dl_stid
);
5576 end_offset(u64 start
, u64 len
)
5581 return end
>= start
? end
: NFS4_MAX_UINT64
;
5584 /* last octet in a range */
5586 last_byte_offset(u64 start
, u64 len
)
5592 return end
> start
? end
- 1: NFS4_MAX_UINT64
;
5596 * TODO: Linux file offsets are _signed_ 64-bit quantities, which means that
5597 * we can't properly handle lock requests that go beyond the (2^63 - 1)-th
5598 * byte, because of sign extension problems. Since NFSv4 calls for 64-bit
5599 * locking, this prevents us from being completely protocol-compliant. The
5600 * real solution to this problem is to start using unsigned file offsets in
5601 * the VFS, but this is a very deep change!
5604 nfs4_transform_lock_offset(struct file_lock
*lock
)
5606 if (lock
->fl_start
< 0)
5607 lock
->fl_start
= OFFSET_MAX
;
5608 if (lock
->fl_end
< 0)
5609 lock
->fl_end
= OFFSET_MAX
;
5613 nfsd4_fl_get_owner(fl_owner_t owner
)
5615 struct nfs4_lockowner
*lo
= (struct nfs4_lockowner
*)owner
;
5617 nfs4_get_stateowner(&lo
->lo_owner
);
5622 nfsd4_fl_put_owner(fl_owner_t owner
)
5624 struct nfs4_lockowner
*lo
= (struct nfs4_lockowner
*)owner
;
5627 nfs4_put_stateowner(&lo
->lo_owner
);
5631 nfsd4_lm_notify(struct file_lock
*fl
)
5633 struct nfs4_lockowner
*lo
= (struct nfs4_lockowner
*)fl
->fl_owner
;
5634 struct net
*net
= lo
->lo_owner
.so_client
->net
;
5635 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
5636 struct nfsd4_blocked_lock
*nbl
= container_of(fl
,
5637 struct nfsd4_blocked_lock
, nbl_lock
);
5640 /* An empty list means that something else is going to be using it */
5641 spin_lock(&nn
->blocked_locks_lock
);
5642 if (!list_empty(&nbl
->nbl_list
)) {
5643 list_del_init(&nbl
->nbl_list
);
5644 list_del_init(&nbl
->nbl_lru
);
5647 spin_unlock(&nn
->blocked_locks_lock
);
5650 nfsd4_run_cb(&nbl
->nbl_cb
);
5653 static const struct lock_manager_operations nfsd_posix_mng_ops
= {
5654 .lm_notify
= nfsd4_lm_notify
,
5655 .lm_get_owner
= nfsd4_fl_get_owner
,
5656 .lm_put_owner
= nfsd4_fl_put_owner
,
5660 nfs4_set_lock_denied(struct file_lock
*fl
, struct nfsd4_lock_denied
*deny
)
5662 struct nfs4_lockowner
*lo
;
5664 if (fl
->fl_lmops
== &nfsd_posix_mng_ops
) {
5665 lo
= (struct nfs4_lockowner
*) fl
->fl_owner
;
5666 deny
->ld_owner
.data
= kmemdup(lo
->lo_owner
.so_owner
.data
,
5667 lo
->lo_owner
.so_owner
.len
, GFP_KERNEL
);
5668 if (!deny
->ld_owner
.data
)
5669 /* We just don't care that much */
5671 deny
->ld_owner
.len
= lo
->lo_owner
.so_owner
.len
;
5672 deny
->ld_clientid
= lo
->lo_owner
.so_client
->cl_clientid
;
5675 deny
->ld_owner
.len
= 0;
5676 deny
->ld_owner
.data
= NULL
;
5677 deny
->ld_clientid
.cl_boot
= 0;
5678 deny
->ld_clientid
.cl_id
= 0;
5680 deny
->ld_start
= fl
->fl_start
;
5681 deny
->ld_length
= NFS4_MAX_UINT64
;
5682 if (fl
->fl_end
!= NFS4_MAX_UINT64
)
5683 deny
->ld_length
= fl
->fl_end
- fl
->fl_start
+ 1;
5684 deny
->ld_type
= NFS4_READ_LT
;
5685 if (fl
->fl_type
!= F_RDLCK
)
5686 deny
->ld_type
= NFS4_WRITE_LT
;
5689 static struct nfs4_lockowner
*
5690 find_lockowner_str_locked(struct nfs4_client
*clp
, struct xdr_netobj
*owner
)
5692 unsigned int strhashval
= ownerstr_hashval(owner
);
5693 struct nfs4_stateowner
*so
;
5695 lockdep_assert_held(&clp
->cl_lock
);
5697 list_for_each_entry(so
, &clp
->cl_ownerstr_hashtbl
[strhashval
],
5699 if (so
->so_is_open_owner
)
5701 if (same_owner_str(so
, owner
))
5702 return lockowner(nfs4_get_stateowner(so
));
5707 static struct nfs4_lockowner
*
5708 find_lockowner_str(struct nfs4_client
*clp
, struct xdr_netobj
*owner
)
5710 struct nfs4_lockowner
*lo
;
5712 spin_lock(&clp
->cl_lock
);
5713 lo
= find_lockowner_str_locked(clp
, owner
);
5714 spin_unlock(&clp
->cl_lock
);
5718 static void nfs4_unhash_lockowner(struct nfs4_stateowner
*sop
)
5720 unhash_lockowner_locked(lockowner(sop
));
5723 static void nfs4_free_lockowner(struct nfs4_stateowner
*sop
)
5725 struct nfs4_lockowner
*lo
= lockowner(sop
);
5727 kmem_cache_free(lockowner_slab
, lo
);
5730 static const struct nfs4_stateowner_operations lockowner_ops
= {
5731 .so_unhash
= nfs4_unhash_lockowner
,
5732 .so_free
= nfs4_free_lockowner
,
5736 * Alloc a lock owner structure.
5737 * Called in nfsd4_lock - therefore, OPEN and OPEN_CONFIRM (if needed) has
5740 * strhashval = ownerstr_hashval
5742 static struct nfs4_lockowner
*
5743 alloc_init_lock_stateowner(unsigned int strhashval
, struct nfs4_client
*clp
,
5744 struct nfs4_ol_stateid
*open_stp
,
5745 struct nfsd4_lock
*lock
)
5747 struct nfs4_lockowner
*lo
, *ret
;
5749 lo
= alloc_stateowner(lockowner_slab
, &lock
->lk_new_owner
, clp
);
5752 INIT_LIST_HEAD(&lo
->lo_blocked
);
5753 INIT_LIST_HEAD(&lo
->lo_owner
.so_stateids
);
5754 lo
->lo_owner
.so_is_open_owner
= 0;
5755 lo
->lo_owner
.so_seqid
= lock
->lk_new_lock_seqid
;
5756 lo
->lo_owner
.so_ops
= &lockowner_ops
;
5757 spin_lock(&clp
->cl_lock
);
5758 ret
= find_lockowner_str_locked(clp
, &lock
->lk_new_owner
);
5760 list_add(&lo
->lo_owner
.so_strhash
,
5761 &clp
->cl_ownerstr_hashtbl
[strhashval
]);
5764 nfs4_free_stateowner(&lo
->lo_owner
);
5766 spin_unlock(&clp
->cl_lock
);
5770 static struct nfs4_ol_stateid
*
5771 find_lock_stateid(struct nfs4_lockowner
*lo
, struct nfs4_file
*fp
)
5773 struct nfs4_ol_stateid
*lst
;
5774 struct nfs4_client
*clp
= lo
->lo_owner
.so_client
;
5776 lockdep_assert_held(&clp
->cl_lock
);
5778 list_for_each_entry(lst
, &lo
->lo_owner
.so_stateids
, st_perstateowner
) {
5779 if (lst
->st_stid
.sc_type
!= NFS4_LOCK_STID
)
5781 if (lst
->st_stid
.sc_file
== fp
) {
5782 refcount_inc(&lst
->st_stid
.sc_count
);
5789 static struct nfs4_ol_stateid
*
5790 init_lock_stateid(struct nfs4_ol_stateid
*stp
, struct nfs4_lockowner
*lo
,
5791 struct nfs4_file
*fp
, struct inode
*inode
,
5792 struct nfs4_ol_stateid
*open_stp
)
5794 struct nfs4_client
*clp
= lo
->lo_owner
.so_client
;
5795 struct nfs4_ol_stateid
*retstp
;
5797 mutex_init(&stp
->st_mutex
);
5798 mutex_lock_nested(&stp
->st_mutex
, OPEN_STATEID_MUTEX
);
5800 spin_lock(&clp
->cl_lock
);
5801 spin_lock(&fp
->fi_lock
);
5802 retstp
= find_lock_stateid(lo
, fp
);
5806 refcount_inc(&stp
->st_stid
.sc_count
);
5807 stp
->st_stid
.sc_type
= NFS4_LOCK_STID
;
5808 stp
->st_stateowner
= nfs4_get_stateowner(&lo
->lo_owner
);
5810 stp
->st_stid
.sc_file
= fp
;
5811 stp
->st_access_bmap
= 0;
5812 stp
->st_deny_bmap
= open_stp
->st_deny_bmap
;
5813 stp
->st_openstp
= open_stp
;
5814 list_add(&stp
->st_locks
, &open_stp
->st_locks
);
5815 list_add(&stp
->st_perstateowner
, &lo
->lo_owner
.so_stateids
);
5816 list_add(&stp
->st_perfile
, &fp
->fi_stateids
);
5818 spin_unlock(&fp
->fi_lock
);
5819 spin_unlock(&clp
->cl_lock
);
5821 if (nfsd4_lock_ol_stateid(retstp
) != nfs_ok
) {
5822 nfs4_put_stid(&retstp
->st_stid
);
5825 /* To keep mutex tracking happy */
5826 mutex_unlock(&stp
->st_mutex
);
5832 static struct nfs4_ol_stateid
*
5833 find_or_create_lock_stateid(struct nfs4_lockowner
*lo
, struct nfs4_file
*fi
,
5834 struct inode
*inode
, struct nfs4_ol_stateid
*ost
,
5837 struct nfs4_stid
*ns
= NULL
;
5838 struct nfs4_ol_stateid
*lst
;
5839 struct nfs4_openowner
*oo
= openowner(ost
->st_stateowner
);
5840 struct nfs4_client
*clp
= oo
->oo_owner
.so_client
;
5843 spin_lock(&clp
->cl_lock
);
5844 lst
= find_lock_stateid(lo
, fi
);
5845 spin_unlock(&clp
->cl_lock
);
5847 if (nfsd4_lock_ol_stateid(lst
) == nfs_ok
)
5849 nfs4_put_stid(&lst
->st_stid
);
5851 ns
= nfs4_alloc_stid(clp
, stateid_slab
, nfs4_free_lock_stateid
);
5855 lst
= init_lock_stateid(openlockstateid(ns
), lo
, fi
, inode
, ost
);
5856 if (lst
== openlockstateid(ns
))
5865 check_lock_length(u64 offset
, u64 length
)
5867 return ((length
== 0) || ((length
!= NFS4_MAX_UINT64
) &&
5868 (length
> ~offset
)));
5871 static void get_lock_access(struct nfs4_ol_stateid
*lock_stp
, u32 access
)
5873 struct nfs4_file
*fp
= lock_stp
->st_stid
.sc_file
;
5875 lockdep_assert_held(&fp
->fi_lock
);
5877 if (test_access(access
, lock_stp
))
5879 __nfs4_file_get_access(fp
, access
);
5880 set_access(access
, lock_stp
);
5884 lookup_or_create_lock_state(struct nfsd4_compound_state
*cstate
,
5885 struct nfs4_ol_stateid
*ost
,
5886 struct nfsd4_lock
*lock
,
5887 struct nfs4_ol_stateid
**plst
, bool *new)
5890 struct nfs4_file
*fi
= ost
->st_stid
.sc_file
;
5891 struct nfs4_openowner
*oo
= openowner(ost
->st_stateowner
);
5892 struct nfs4_client
*cl
= oo
->oo_owner
.so_client
;
5893 struct inode
*inode
= d_inode(cstate
->current_fh
.fh_dentry
);
5894 struct nfs4_lockowner
*lo
;
5895 struct nfs4_ol_stateid
*lst
;
5896 unsigned int strhashval
;
5898 lo
= find_lockowner_str(cl
, &lock
->lk_new_owner
);
5900 strhashval
= ownerstr_hashval(&lock
->lk_new_owner
);
5901 lo
= alloc_init_lock_stateowner(strhashval
, cl
, ost
, lock
);
5903 return nfserr_jukebox
;
5905 /* with an existing lockowner, seqids must be the same */
5906 status
= nfserr_bad_seqid
;
5907 if (!cstate
->minorversion
&&
5908 lock
->lk_new_lock_seqid
!= lo
->lo_owner
.so_seqid
)
5912 lst
= find_or_create_lock_stateid(lo
, fi
, inode
, ost
, new);
5914 status
= nfserr_jukebox
;
5921 nfs4_put_stateowner(&lo
->lo_owner
);
5929 nfsd4_lock(struct svc_rqst
*rqstp
, struct nfsd4_compound_state
*cstate
,
5930 union nfsd4_op_u
*u
)
5932 struct nfsd4_lock
*lock
= &u
->lock
;
5933 struct nfs4_openowner
*open_sop
= NULL
;
5934 struct nfs4_lockowner
*lock_sop
= NULL
;
5935 struct nfs4_ol_stateid
*lock_stp
= NULL
;
5936 struct nfs4_ol_stateid
*open_stp
= NULL
;
5937 struct nfs4_file
*fp
;
5938 struct file
*filp
= NULL
;
5939 struct nfsd4_blocked_lock
*nbl
= NULL
;
5940 struct file_lock
*file_lock
= NULL
;
5941 struct file_lock
*conflock
= NULL
;
5946 unsigned char fl_type
;
5947 unsigned int fl_flags
= FL_POSIX
;
5948 struct net
*net
= SVC_NET(rqstp
);
5949 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
5951 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
5952 (long long) lock
->lk_offset
,
5953 (long long) lock
->lk_length
);
5955 if (check_lock_length(lock
->lk_offset
, lock
->lk_length
))
5956 return nfserr_inval
;
5958 if ((status
= fh_verify(rqstp
, &cstate
->current_fh
,
5959 S_IFREG
, NFSD_MAY_LOCK
))) {
5960 dprintk("NFSD: nfsd4_lock: permission denied!\n");
5964 if (lock
->lk_is_new
) {
5965 if (nfsd4_has_session(cstate
))
5966 /* See rfc 5661 18.10.3: given clientid is ignored: */
5967 memcpy(&lock
->lk_new_clientid
,
5968 &cstate
->session
->se_client
->cl_clientid
,
5969 sizeof(clientid_t
));
5971 status
= nfserr_stale_clientid
;
5972 if (STALE_CLIENTID(&lock
->lk_new_clientid
, nn
))
5975 /* validate and update open stateid and open seqid */
5976 status
= nfs4_preprocess_confirmed_seqid_op(cstate
,
5977 lock
->lk_new_open_seqid
,
5978 &lock
->lk_new_open_stateid
,
5982 mutex_unlock(&open_stp
->st_mutex
);
5983 open_sop
= openowner(open_stp
->st_stateowner
);
5984 status
= nfserr_bad_stateid
;
5985 if (!same_clid(&open_sop
->oo_owner
.so_client
->cl_clientid
,
5986 &lock
->lk_new_clientid
))
5988 status
= lookup_or_create_lock_state(cstate
, open_stp
, lock
,
5991 status
= nfs4_preprocess_seqid_op(cstate
,
5992 lock
->lk_old_lock_seqid
,
5993 &lock
->lk_old_lock_stateid
,
5994 NFS4_LOCK_STID
, &lock_stp
, nn
);
5998 lock_sop
= lockowner(lock_stp
->st_stateowner
);
6000 lkflg
= setlkflg(lock
->lk_type
);
6001 status
= nfs4_check_openmode(lock_stp
, lkflg
);
6005 status
= nfserr_grace
;
6006 if (locks_in_grace(net
) && !lock
->lk_reclaim
)
6008 status
= nfserr_no_grace
;
6009 if (!locks_in_grace(net
) && lock
->lk_reclaim
)
6012 fp
= lock_stp
->st_stid
.sc_file
;
6013 switch (lock
->lk_type
) {
6015 if (nfsd4_has_session(cstate
))
6016 fl_flags
|= FL_SLEEP
;
6019 spin_lock(&fp
->fi_lock
);
6020 filp
= find_readable_file_locked(fp
);
6022 get_lock_access(lock_stp
, NFS4_SHARE_ACCESS_READ
);
6023 spin_unlock(&fp
->fi_lock
);
6026 case NFS4_WRITEW_LT
:
6027 if (nfsd4_has_session(cstate
))
6028 fl_flags
|= FL_SLEEP
;
6031 spin_lock(&fp
->fi_lock
);
6032 filp
= find_writeable_file_locked(fp
);
6034 get_lock_access(lock_stp
, NFS4_SHARE_ACCESS_WRITE
);
6035 spin_unlock(&fp
->fi_lock
);
6039 status
= nfserr_inval
;
6044 status
= nfserr_openmode
;
6048 nbl
= find_or_allocate_block(lock_sop
, &fp
->fi_fhandle
, nn
);
6050 dprintk("NFSD: %s: unable to allocate block!\n", __func__
);
6051 status
= nfserr_jukebox
;
6055 file_lock
= &nbl
->nbl_lock
;
6056 file_lock
->fl_type
= fl_type
;
6057 file_lock
->fl_owner
= (fl_owner_t
)lockowner(nfs4_get_stateowner(&lock_sop
->lo_owner
));
6058 file_lock
->fl_pid
= current
->tgid
;
6059 file_lock
->fl_file
= filp
;
6060 file_lock
->fl_flags
= fl_flags
;
6061 file_lock
->fl_lmops
= &nfsd_posix_mng_ops
;
6062 file_lock
->fl_start
= lock
->lk_offset
;
6063 file_lock
->fl_end
= last_byte_offset(lock
->lk_offset
, lock
->lk_length
);
6064 nfs4_transform_lock_offset(file_lock
);
6066 conflock
= locks_alloc_lock();
6068 dprintk("NFSD: %s: unable to allocate lock!\n", __func__
);
6069 status
= nfserr_jukebox
;
6073 if (fl_flags
& FL_SLEEP
) {
6074 nbl
->nbl_time
= jiffies
;
6075 spin_lock(&nn
->blocked_locks_lock
);
6076 list_add_tail(&nbl
->nbl_list
, &lock_sop
->lo_blocked
);
6077 list_add_tail(&nbl
->nbl_lru
, &nn
->blocked_locks_lru
);
6078 spin_unlock(&nn
->blocked_locks_lock
);
6081 err
= vfs_lock_file(filp
, F_SETLK
, file_lock
, conflock
);
6083 case 0: /* success! */
6084 nfs4_inc_and_copy_stateid(&lock
->lk_resp_stateid
, &lock_stp
->st_stid
);
6086 if (lock
->lk_reclaim
)
6087 nn
->somebody_reclaimed
= true;
6089 case FILE_LOCK_DEFERRED
:
6092 case -EAGAIN
: /* conflock holds conflicting lock */
6093 status
= nfserr_denied
;
6094 dprintk("NFSD: nfsd4_lock: conflicting lock found!\n");
6095 nfs4_set_lock_denied(conflock
, &lock
->lk_denied
);
6098 status
= nfserr_deadlock
;
6101 dprintk("NFSD: nfsd4_lock: vfs_lock_file() failed! status %d\n",err
);
6102 status
= nfserrno(err
);
6107 /* dequeue it if we queued it before */
6108 if (fl_flags
& FL_SLEEP
) {
6109 spin_lock(&nn
->blocked_locks_lock
);
6110 list_del_init(&nbl
->nbl_list
);
6111 list_del_init(&nbl
->nbl_lru
);
6112 spin_unlock(&nn
->blocked_locks_lock
);
6114 free_blocked_lock(nbl
);
6119 /* Bump seqid manually if the 4.0 replay owner is openowner */
6120 if (cstate
->replay_owner
&&
6121 cstate
->replay_owner
!= &lock_sop
->lo_owner
&&
6122 seqid_mutating_err(ntohl(status
)))
6123 lock_sop
->lo_owner
.so_seqid
++;
6126 * If this is a new, never-before-used stateid, and we are
6127 * returning an error, then just go ahead and release it.
6130 release_lock_stateid(lock_stp
);
6132 mutex_unlock(&lock_stp
->st_mutex
);
6134 nfs4_put_stid(&lock_stp
->st_stid
);
6137 nfs4_put_stid(&open_stp
->st_stid
);
6138 nfsd4_bump_seqid(cstate
, status
);
6140 locks_free_lock(conflock
);
6145 * The NFSv4 spec allows a client to do a LOCKT without holding an OPEN,
6146 * so we do a temporary open here just to get an open file to pass to
6147 * vfs_test_lock. (Arguably perhaps test_lock should be done with an
6150 static __be32
nfsd_test_lock(struct svc_rqst
*rqstp
, struct svc_fh
*fhp
, struct file_lock
*lock
)
6153 __be32 err
= nfsd_open(rqstp
, fhp
, S_IFREG
, NFSD_MAY_READ
, &file
);
6155 err
= nfserrno(vfs_test_lock(file
, lock
));
6165 nfsd4_lockt(struct svc_rqst
*rqstp
, struct nfsd4_compound_state
*cstate
,
6166 union nfsd4_op_u
*u
)
6168 struct nfsd4_lockt
*lockt
= &u
->lockt
;
6169 struct file_lock
*file_lock
= NULL
;
6170 struct nfs4_lockowner
*lo
= NULL
;
6172 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
6174 if (locks_in_grace(SVC_NET(rqstp
)))
6175 return nfserr_grace
;
6177 if (check_lock_length(lockt
->lt_offset
, lockt
->lt_length
))
6178 return nfserr_inval
;
6180 if (!nfsd4_has_session(cstate
)) {
6181 status
= lookup_clientid(&lockt
->lt_clientid
, cstate
, nn
);
6186 if ((status
= fh_verify(rqstp
, &cstate
->current_fh
, S_IFREG
, 0)))
6189 file_lock
= locks_alloc_lock();
6191 dprintk("NFSD: %s: unable to allocate lock!\n", __func__
);
6192 status
= nfserr_jukebox
;
6196 switch (lockt
->lt_type
) {
6199 file_lock
->fl_type
= F_RDLCK
;
6202 case NFS4_WRITEW_LT
:
6203 file_lock
->fl_type
= F_WRLCK
;
6206 dprintk("NFSD: nfs4_lockt: bad lock type!\n");
6207 status
= nfserr_inval
;
6211 lo
= find_lockowner_str(cstate
->clp
, &lockt
->lt_owner
);
6213 file_lock
->fl_owner
= (fl_owner_t
)lo
;
6214 file_lock
->fl_pid
= current
->tgid
;
6215 file_lock
->fl_flags
= FL_POSIX
;
6217 file_lock
->fl_start
= lockt
->lt_offset
;
6218 file_lock
->fl_end
= last_byte_offset(lockt
->lt_offset
, lockt
->lt_length
);
6220 nfs4_transform_lock_offset(file_lock
);
6222 status
= nfsd_test_lock(rqstp
, &cstate
->current_fh
, file_lock
);
6226 if (file_lock
->fl_type
!= F_UNLCK
) {
6227 status
= nfserr_denied
;
6228 nfs4_set_lock_denied(file_lock
, &lockt
->lt_denied
);
6232 nfs4_put_stateowner(&lo
->lo_owner
);
6234 locks_free_lock(file_lock
);
6239 nfsd4_locku(struct svc_rqst
*rqstp
, struct nfsd4_compound_state
*cstate
,
6240 union nfsd4_op_u
*u
)
6242 struct nfsd4_locku
*locku
= &u
->locku
;
6243 struct nfs4_ol_stateid
*stp
;
6244 struct file
*filp
= NULL
;
6245 struct file_lock
*file_lock
= NULL
;
6248 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
6250 dprintk("NFSD: nfsd4_locku: start=%Ld length=%Ld\n",
6251 (long long) locku
->lu_offset
,
6252 (long long) locku
->lu_length
);
6254 if (check_lock_length(locku
->lu_offset
, locku
->lu_length
))
6255 return nfserr_inval
;
6257 status
= nfs4_preprocess_seqid_op(cstate
, locku
->lu_seqid
,
6258 &locku
->lu_stateid
, NFS4_LOCK_STID
,
6262 filp
= find_any_file(stp
->st_stid
.sc_file
);
6264 status
= nfserr_lock_range
;
6267 file_lock
= locks_alloc_lock();
6269 dprintk("NFSD: %s: unable to allocate lock!\n", __func__
);
6270 status
= nfserr_jukebox
;
6274 file_lock
->fl_type
= F_UNLCK
;
6275 file_lock
->fl_owner
= (fl_owner_t
)lockowner(nfs4_get_stateowner(stp
->st_stateowner
));
6276 file_lock
->fl_pid
= current
->tgid
;
6277 file_lock
->fl_file
= filp
;
6278 file_lock
->fl_flags
= FL_POSIX
;
6279 file_lock
->fl_lmops
= &nfsd_posix_mng_ops
;
6280 file_lock
->fl_start
= locku
->lu_offset
;
6282 file_lock
->fl_end
= last_byte_offset(locku
->lu_offset
,
6284 nfs4_transform_lock_offset(file_lock
);
6286 err
= vfs_lock_file(filp
, F_SETLK
, file_lock
, NULL
);
6288 dprintk("NFSD: nfs4_locku: vfs_lock_file failed!\n");
6291 nfs4_inc_and_copy_stateid(&locku
->lu_stateid
, &stp
->st_stid
);
6295 mutex_unlock(&stp
->st_mutex
);
6296 nfs4_put_stid(&stp
->st_stid
);
6298 nfsd4_bump_seqid(cstate
, status
);
6300 locks_free_lock(file_lock
);
6304 status
= nfserrno(err
);
6310 * true: locks held by lockowner
6311 * false: no locks held by lockowner
6314 check_for_locks(struct nfs4_file
*fp
, struct nfs4_lockowner
*lowner
)
6316 struct file_lock
*fl
;
6318 struct file
*filp
= find_any_file(fp
);
6319 struct inode
*inode
;
6320 struct file_lock_context
*flctx
;
6323 /* Any valid lock stateid should have some sort of access */
6328 inode
= locks_inode(filp
);
6329 flctx
= inode
->i_flctx
;
6331 if (flctx
&& !list_empty_careful(&flctx
->flc_posix
)) {
6332 spin_lock(&flctx
->flc_lock
);
6333 list_for_each_entry(fl
, &flctx
->flc_posix
, fl_list
) {
6334 if (fl
->fl_owner
== (fl_owner_t
)lowner
) {
6339 spin_unlock(&flctx
->flc_lock
);
6346 nfsd4_release_lockowner(struct svc_rqst
*rqstp
,
6347 struct nfsd4_compound_state
*cstate
,
6348 union nfsd4_op_u
*u
)
6350 struct nfsd4_release_lockowner
*rlockowner
= &u
->release_lockowner
;
6351 clientid_t
*clid
= &rlockowner
->rl_clientid
;
6352 struct nfs4_stateowner
*sop
;
6353 struct nfs4_lockowner
*lo
= NULL
;
6354 struct nfs4_ol_stateid
*stp
;
6355 struct xdr_netobj
*owner
= &rlockowner
->rl_owner
;
6356 unsigned int hashval
= ownerstr_hashval(owner
);
6358 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
6359 struct nfs4_client
*clp
;
6360 LIST_HEAD (reaplist
);
6362 dprintk("nfsd4_release_lockowner clientid: (%08x/%08x):\n",
6363 clid
->cl_boot
, clid
->cl_id
);
6365 status
= lookup_clientid(clid
, cstate
, nn
);
6370 /* Find the matching lock stateowner */
6371 spin_lock(&clp
->cl_lock
);
6372 list_for_each_entry(sop
, &clp
->cl_ownerstr_hashtbl
[hashval
],
6375 if (sop
->so_is_open_owner
|| !same_owner_str(sop
, owner
))
6378 /* see if there are still any locks associated with it */
6379 lo
= lockowner(sop
);
6380 list_for_each_entry(stp
, &sop
->so_stateids
, st_perstateowner
) {
6381 if (check_for_locks(stp
->st_stid
.sc_file
, lo
)) {
6382 status
= nfserr_locks_held
;
6383 spin_unlock(&clp
->cl_lock
);
6388 nfs4_get_stateowner(sop
);
6392 spin_unlock(&clp
->cl_lock
);
6396 unhash_lockowner_locked(lo
);
6397 while (!list_empty(&lo
->lo_owner
.so_stateids
)) {
6398 stp
= list_first_entry(&lo
->lo_owner
.so_stateids
,
6399 struct nfs4_ol_stateid
,
6401 WARN_ON(!unhash_lock_stateid(stp
));
6402 put_ol_stateid_locked(stp
, &reaplist
);
6404 spin_unlock(&clp
->cl_lock
);
6405 free_ol_stateid_reaplist(&reaplist
);
6406 remove_blocked_locks(lo
);
6407 nfs4_put_stateowner(&lo
->lo_owner
);
6412 static inline struct nfs4_client_reclaim
*
6415 return kmalloc(sizeof(struct nfs4_client_reclaim
), GFP_KERNEL
);
6419 nfs4_has_reclaimed_state(const char *name
, struct nfsd_net
*nn
)
6421 struct nfs4_client_reclaim
*crp
;
6423 crp
= nfsd4_find_reclaim_client(name
, nn
);
6424 return (crp
&& crp
->cr_clp
);
6428 * failure => all reset bets are off, nfserr_no_grace...
6430 struct nfs4_client_reclaim
*
6431 nfs4_client_to_reclaim(const char *name
, struct nfsd_net
*nn
)
6433 unsigned int strhashval
;
6434 struct nfs4_client_reclaim
*crp
;
6436 dprintk("NFSD nfs4_client_to_reclaim NAME: %.*s\n", HEXDIR_LEN
, name
);
6437 crp
= alloc_reclaim();
6439 strhashval
= clientstr_hashval(name
);
6440 INIT_LIST_HEAD(&crp
->cr_strhash
);
6441 list_add(&crp
->cr_strhash
, &nn
->reclaim_str_hashtbl
[strhashval
]);
6442 memcpy(crp
->cr_recdir
, name
, HEXDIR_LEN
);
6444 nn
->reclaim_str_hashtbl_size
++;
6450 nfs4_remove_reclaim_record(struct nfs4_client_reclaim
*crp
, struct nfsd_net
*nn
)
6452 list_del(&crp
->cr_strhash
);
6454 nn
->reclaim_str_hashtbl_size
--;
6458 nfs4_release_reclaim(struct nfsd_net
*nn
)
6460 struct nfs4_client_reclaim
*crp
= NULL
;
6463 for (i
= 0; i
< CLIENT_HASH_SIZE
; i
++) {
6464 while (!list_empty(&nn
->reclaim_str_hashtbl
[i
])) {
6465 crp
= list_entry(nn
->reclaim_str_hashtbl
[i
].next
,
6466 struct nfs4_client_reclaim
, cr_strhash
);
6467 nfs4_remove_reclaim_record(crp
, nn
);
6470 WARN_ON_ONCE(nn
->reclaim_str_hashtbl_size
);
6474 * called from OPEN, CLAIM_PREVIOUS with a new clientid. */
6475 struct nfs4_client_reclaim
*
6476 nfsd4_find_reclaim_client(const char *recdir
, struct nfsd_net
*nn
)
6478 unsigned int strhashval
;
6479 struct nfs4_client_reclaim
*crp
= NULL
;
6481 dprintk("NFSD: nfs4_find_reclaim_client for recdir %s\n", recdir
);
6483 strhashval
= clientstr_hashval(recdir
);
6484 list_for_each_entry(crp
, &nn
->reclaim_str_hashtbl
[strhashval
], cr_strhash
) {
6485 if (same_name(crp
->cr_recdir
, recdir
)) {
6493 * Called from OPEN. Look for clientid in reclaim list.
6496 nfs4_check_open_reclaim(clientid_t
*clid
,
6497 struct nfsd4_compound_state
*cstate
,
6498 struct nfsd_net
*nn
)
6502 /* find clientid in conf_id_hashtbl */
6503 status
= lookup_clientid(clid
, cstate
, nn
);
6505 return nfserr_reclaim_bad
;
6507 if (test_bit(NFSD4_CLIENT_RECLAIM_COMPLETE
, &cstate
->clp
->cl_flags
))
6508 return nfserr_no_grace
;
6510 if (nfsd4_client_record_check(cstate
->clp
))
6511 return nfserr_reclaim_bad
;
6516 #ifdef CONFIG_NFSD_FAULT_INJECTION
6518 put_client(struct nfs4_client
*clp
)
6520 atomic_dec(&clp
->cl_refcount
);
6523 static struct nfs4_client
*
6524 nfsd_find_client(struct sockaddr_storage
*addr
, size_t addr_size
)
6526 struct nfs4_client
*clp
;
6527 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
6530 if (!nfsd_netns_ready(nn
))
6533 list_for_each_entry(clp
, &nn
->client_lru
, cl_lru
) {
6534 if (memcmp(&clp
->cl_addr
, addr
, addr_size
) == 0)
6541 nfsd_inject_print_clients(void)
6543 struct nfs4_client
*clp
;
6545 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
6547 char buf
[INET6_ADDRSTRLEN
];
6549 if (!nfsd_netns_ready(nn
))
6552 spin_lock(&nn
->client_lock
);
6553 list_for_each_entry(clp
, &nn
->client_lru
, cl_lru
) {
6554 rpc_ntop((struct sockaddr
*)&clp
->cl_addr
, buf
, sizeof(buf
));
6555 pr_info("NFS Client: %s\n", buf
);
6558 spin_unlock(&nn
->client_lock
);
6564 nfsd_inject_forget_client(struct sockaddr_storage
*addr
, size_t addr_size
)
6567 struct nfs4_client
*clp
;
6568 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
6571 if (!nfsd_netns_ready(nn
))
6574 spin_lock(&nn
->client_lock
);
6575 clp
= nfsd_find_client(addr
, addr_size
);
6577 if (mark_client_expired_locked(clp
) == nfs_ok
)
6582 spin_unlock(&nn
->client_lock
);
6591 nfsd_inject_forget_clients(u64 max
)
6594 struct nfs4_client
*clp
, *next
;
6595 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
6597 LIST_HEAD(reaplist
);
6599 if (!nfsd_netns_ready(nn
))
6602 spin_lock(&nn
->client_lock
);
6603 list_for_each_entry_safe(clp
, next
, &nn
->client_lru
, cl_lru
) {
6604 if (mark_client_expired_locked(clp
) == nfs_ok
) {
6605 list_add(&clp
->cl_lru
, &reaplist
);
6606 if (max
!= 0 && ++count
>= max
)
6610 spin_unlock(&nn
->client_lock
);
6612 list_for_each_entry_safe(clp
, next
, &reaplist
, cl_lru
)
6618 static void nfsd_print_count(struct nfs4_client
*clp
, unsigned int count
,
6621 char buf
[INET6_ADDRSTRLEN
];
6622 rpc_ntop((struct sockaddr
*)&clp
->cl_addr
, buf
, sizeof(buf
));
6623 printk(KERN_INFO
"NFS Client: %s has %u %s\n", buf
, count
, type
);
6627 nfsd_inject_add_lock_to_list(struct nfs4_ol_stateid
*lst
,
6628 struct list_head
*collect
)
6630 struct nfs4_client
*clp
= lst
->st_stid
.sc_client
;
6631 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
6637 lockdep_assert_held(&nn
->client_lock
);
6638 atomic_inc(&clp
->cl_refcount
);
6639 list_add(&lst
->st_locks
, collect
);
6642 static u64
nfsd_foreach_client_lock(struct nfs4_client
*clp
, u64 max
,
6643 struct list_head
*collect
,
6644 bool (*func
)(struct nfs4_ol_stateid
*))
6646 struct nfs4_openowner
*oop
;
6647 struct nfs4_ol_stateid
*stp
, *st_next
;
6648 struct nfs4_ol_stateid
*lst
, *lst_next
;
6651 spin_lock(&clp
->cl_lock
);
6652 list_for_each_entry(oop
, &clp
->cl_openowners
, oo_perclient
) {
6653 list_for_each_entry_safe(stp
, st_next
,
6654 &oop
->oo_owner
.so_stateids
, st_perstateowner
) {
6655 list_for_each_entry_safe(lst
, lst_next
,
6656 &stp
->st_locks
, st_locks
) {
6659 nfsd_inject_add_lock_to_list(lst
,
6664 * Despite the fact that these functions deal
6665 * with 64-bit integers for "count", we must
6666 * ensure that it doesn't blow up the
6667 * clp->cl_refcount. Throw a warning if we
6668 * start to approach INT_MAX here.
6670 WARN_ON_ONCE(count
== (INT_MAX
/ 2));
6677 spin_unlock(&clp
->cl_lock
);
6683 nfsd_collect_client_locks(struct nfs4_client
*clp
, struct list_head
*collect
,
6686 return nfsd_foreach_client_lock(clp
, max
, collect
, unhash_lock_stateid
);
6690 nfsd_print_client_locks(struct nfs4_client
*clp
)
6692 u64 count
= nfsd_foreach_client_lock(clp
, 0, NULL
, NULL
);
6693 nfsd_print_count(clp
, count
, "locked files");
6698 nfsd_inject_print_locks(void)
6700 struct nfs4_client
*clp
;
6702 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
6705 if (!nfsd_netns_ready(nn
))
6708 spin_lock(&nn
->client_lock
);
6709 list_for_each_entry(clp
, &nn
->client_lru
, cl_lru
)
6710 count
+= nfsd_print_client_locks(clp
);
6711 spin_unlock(&nn
->client_lock
);
6717 nfsd_reap_locks(struct list_head
*reaplist
)
6719 struct nfs4_client
*clp
;
6720 struct nfs4_ol_stateid
*stp
, *next
;
6722 list_for_each_entry_safe(stp
, next
, reaplist
, st_locks
) {
6723 list_del_init(&stp
->st_locks
);
6724 clp
= stp
->st_stid
.sc_client
;
6725 nfs4_put_stid(&stp
->st_stid
);
6731 nfsd_inject_forget_client_locks(struct sockaddr_storage
*addr
, size_t addr_size
)
6733 unsigned int count
= 0;
6734 struct nfs4_client
*clp
;
6735 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
6737 LIST_HEAD(reaplist
);
6739 if (!nfsd_netns_ready(nn
))
6742 spin_lock(&nn
->client_lock
);
6743 clp
= nfsd_find_client(addr
, addr_size
);
6745 count
= nfsd_collect_client_locks(clp
, &reaplist
, 0);
6746 spin_unlock(&nn
->client_lock
);
6747 nfsd_reap_locks(&reaplist
);
6752 nfsd_inject_forget_locks(u64 max
)
6755 struct nfs4_client
*clp
;
6756 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
6758 LIST_HEAD(reaplist
);
6760 if (!nfsd_netns_ready(nn
))
6763 spin_lock(&nn
->client_lock
);
6764 list_for_each_entry(clp
, &nn
->client_lru
, cl_lru
) {
6765 count
+= nfsd_collect_client_locks(clp
, &reaplist
, max
- count
);
6766 if (max
!= 0 && count
>= max
)
6769 spin_unlock(&nn
->client_lock
);
6770 nfsd_reap_locks(&reaplist
);
6775 nfsd_foreach_client_openowner(struct nfs4_client
*clp
, u64 max
,
6776 struct list_head
*collect
,
6777 void (*func
)(struct nfs4_openowner
*))
6779 struct nfs4_openowner
*oop
, *next
;
6780 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
6784 lockdep_assert_held(&nn
->client_lock
);
6786 spin_lock(&clp
->cl_lock
);
6787 list_for_each_entry_safe(oop
, next
, &clp
->cl_openowners
, oo_perclient
) {
6791 atomic_inc(&clp
->cl_refcount
);
6792 list_add(&oop
->oo_perclient
, collect
);
6797 * Despite the fact that these functions deal with
6798 * 64-bit integers for "count", we must ensure that
6799 * it doesn't blow up the clp->cl_refcount. Throw a
6800 * warning if we start to approach INT_MAX here.
6802 WARN_ON_ONCE(count
== (INT_MAX
/ 2));
6806 spin_unlock(&clp
->cl_lock
);
6812 nfsd_print_client_openowners(struct nfs4_client
*clp
)
6814 u64 count
= nfsd_foreach_client_openowner(clp
, 0, NULL
, NULL
);
6816 nfsd_print_count(clp
, count
, "openowners");
6821 nfsd_collect_client_openowners(struct nfs4_client
*clp
,
6822 struct list_head
*collect
, u64 max
)
6824 return nfsd_foreach_client_openowner(clp
, max
, collect
,
6825 unhash_openowner_locked
);
6829 nfsd_inject_print_openowners(void)
6831 struct nfs4_client
*clp
;
6833 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
6836 if (!nfsd_netns_ready(nn
))
6839 spin_lock(&nn
->client_lock
);
6840 list_for_each_entry(clp
, &nn
->client_lru
, cl_lru
)
6841 count
+= nfsd_print_client_openowners(clp
);
6842 spin_unlock(&nn
->client_lock
);
6848 nfsd_reap_openowners(struct list_head
*reaplist
)
6850 struct nfs4_client
*clp
;
6851 struct nfs4_openowner
*oop
, *next
;
6853 list_for_each_entry_safe(oop
, next
, reaplist
, oo_perclient
) {
6854 list_del_init(&oop
->oo_perclient
);
6855 clp
= oop
->oo_owner
.so_client
;
6856 release_openowner(oop
);
6862 nfsd_inject_forget_client_openowners(struct sockaddr_storage
*addr
,
6865 unsigned int count
= 0;
6866 struct nfs4_client
*clp
;
6867 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
6869 LIST_HEAD(reaplist
);
6871 if (!nfsd_netns_ready(nn
))
6874 spin_lock(&nn
->client_lock
);
6875 clp
= nfsd_find_client(addr
, addr_size
);
6877 count
= nfsd_collect_client_openowners(clp
, &reaplist
, 0);
6878 spin_unlock(&nn
->client_lock
);
6879 nfsd_reap_openowners(&reaplist
);
6884 nfsd_inject_forget_openowners(u64 max
)
6887 struct nfs4_client
*clp
;
6888 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
6890 LIST_HEAD(reaplist
);
6892 if (!nfsd_netns_ready(nn
))
6895 spin_lock(&nn
->client_lock
);
6896 list_for_each_entry(clp
, &nn
->client_lru
, cl_lru
) {
6897 count
+= nfsd_collect_client_openowners(clp
, &reaplist
,
6899 if (max
!= 0 && count
>= max
)
6902 spin_unlock(&nn
->client_lock
);
6903 nfsd_reap_openowners(&reaplist
);
6907 static u64
nfsd_find_all_delegations(struct nfs4_client
*clp
, u64 max
,
6908 struct list_head
*victims
)
6910 struct nfs4_delegation
*dp
, *next
;
6911 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
6915 lockdep_assert_held(&nn
->client_lock
);
6917 spin_lock(&state_lock
);
6918 list_for_each_entry_safe(dp
, next
, &clp
->cl_delegations
, dl_perclnt
) {
6921 * It's not safe to mess with delegations that have a
6922 * non-zero dl_time. They might have already been broken
6923 * and could be processed by the laundromat outside of
6924 * the state_lock. Just leave them be.
6926 if (dp
->dl_time
!= 0)
6929 atomic_inc(&clp
->cl_refcount
);
6930 WARN_ON(!unhash_delegation_locked(dp
));
6931 list_add(&dp
->dl_recall_lru
, victims
);
6935 * Despite the fact that these functions deal with
6936 * 64-bit integers for "count", we must ensure that
6937 * it doesn't blow up the clp->cl_refcount. Throw a
6938 * warning if we start to approach INT_MAX here.
6940 WARN_ON_ONCE(count
== (INT_MAX
/ 2));
6944 spin_unlock(&state_lock
);
6949 nfsd_print_client_delegations(struct nfs4_client
*clp
)
6951 u64 count
= nfsd_find_all_delegations(clp
, 0, NULL
);
6953 nfsd_print_count(clp
, count
, "delegations");
6958 nfsd_inject_print_delegations(void)
6960 struct nfs4_client
*clp
;
6962 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
6965 if (!nfsd_netns_ready(nn
))
6968 spin_lock(&nn
->client_lock
);
6969 list_for_each_entry(clp
, &nn
->client_lru
, cl_lru
)
6970 count
+= nfsd_print_client_delegations(clp
);
6971 spin_unlock(&nn
->client_lock
);
6977 nfsd_forget_delegations(struct list_head
*reaplist
)
6979 struct nfs4_client
*clp
;
6980 struct nfs4_delegation
*dp
, *next
;
6982 list_for_each_entry_safe(dp
, next
, reaplist
, dl_recall_lru
) {
6983 list_del_init(&dp
->dl_recall_lru
);
6984 clp
= dp
->dl_stid
.sc_client
;
6985 revoke_delegation(dp
);
6991 nfsd_inject_forget_client_delegations(struct sockaddr_storage
*addr
,
6995 struct nfs4_client
*clp
;
6996 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
6998 LIST_HEAD(reaplist
);
7000 if (!nfsd_netns_ready(nn
))
7003 spin_lock(&nn
->client_lock
);
7004 clp
= nfsd_find_client(addr
, addr_size
);
7006 count
= nfsd_find_all_delegations(clp
, 0, &reaplist
);
7007 spin_unlock(&nn
->client_lock
);
7009 nfsd_forget_delegations(&reaplist
);
7014 nfsd_inject_forget_delegations(u64 max
)
7017 struct nfs4_client
*clp
;
7018 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
7020 LIST_HEAD(reaplist
);
7022 if (!nfsd_netns_ready(nn
))
7025 spin_lock(&nn
->client_lock
);
7026 list_for_each_entry(clp
, &nn
->client_lru
, cl_lru
) {
7027 count
+= nfsd_find_all_delegations(clp
, max
- count
, &reaplist
);
7028 if (max
!= 0 && count
>= max
)
7031 spin_unlock(&nn
->client_lock
);
7032 nfsd_forget_delegations(&reaplist
);
7037 nfsd_recall_delegations(struct list_head
*reaplist
)
7039 struct nfs4_client
*clp
;
7040 struct nfs4_delegation
*dp
, *next
;
7042 list_for_each_entry_safe(dp
, next
, reaplist
, dl_recall_lru
) {
7043 list_del_init(&dp
->dl_recall_lru
);
7044 clp
= dp
->dl_stid
.sc_client
;
7046 * We skipped all entries that had a zero dl_time before,
7047 * so we can now reset the dl_time back to 0. If a delegation
7048 * break comes in now, then it won't make any difference since
7049 * we're recalling it either way.
7051 spin_lock(&state_lock
);
7053 spin_unlock(&state_lock
);
7054 nfsd_break_one_deleg(dp
);
7060 nfsd_inject_recall_client_delegations(struct sockaddr_storage
*addr
,
7064 struct nfs4_client
*clp
;
7065 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
7067 LIST_HEAD(reaplist
);
7069 if (!nfsd_netns_ready(nn
))
7072 spin_lock(&nn
->client_lock
);
7073 clp
= nfsd_find_client(addr
, addr_size
);
7075 count
= nfsd_find_all_delegations(clp
, 0, &reaplist
);
7076 spin_unlock(&nn
->client_lock
);
7078 nfsd_recall_delegations(&reaplist
);
7083 nfsd_inject_recall_delegations(u64 max
)
7086 struct nfs4_client
*clp
, *next
;
7087 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
7089 LIST_HEAD(reaplist
);
7091 if (!nfsd_netns_ready(nn
))
7094 spin_lock(&nn
->client_lock
);
7095 list_for_each_entry_safe(clp
, next
, &nn
->client_lru
, cl_lru
) {
7096 count
+= nfsd_find_all_delegations(clp
, max
- count
, &reaplist
);
7097 if (max
!= 0 && ++count
>= max
)
7100 spin_unlock(&nn
->client_lock
);
7101 nfsd_recall_delegations(&reaplist
);
7104 #endif /* CONFIG_NFSD_FAULT_INJECTION */
7107 * Since the lifetime of a delegation isn't limited to that of an open, a
7108 * client may quite reasonably hang on to a delegation as long as it has
7109 * the inode cached. This becomes an obvious problem the first time a
7110 * client's inode cache approaches the size of the server's total memory.
7112 * For now we avoid this problem by imposing a hard limit on the number
7113 * of delegations, which varies according to the server's memory size.
7116 set_max_delegations(void)
7119 * Allow at most 4 delegations per megabyte of RAM. Quick
7120 * estimates suggest that in the worst case (where every delegation
7121 * is for a different inode), a delegation could take about 1.5K,
7122 * giving a worst case usage of about 6% of memory.
7124 max_delegations
= nr_free_buffer_pages() >> (20 - 2 - PAGE_SHIFT
);
7127 static int nfs4_state_create_net(struct net
*net
)
7129 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
7132 nn
->conf_id_hashtbl
= kmalloc_array(CLIENT_HASH_SIZE
,
7133 sizeof(struct list_head
),
7135 if (!nn
->conf_id_hashtbl
)
7137 nn
->unconf_id_hashtbl
= kmalloc_array(CLIENT_HASH_SIZE
,
7138 sizeof(struct list_head
),
7140 if (!nn
->unconf_id_hashtbl
)
7142 nn
->sessionid_hashtbl
= kmalloc_array(SESSION_HASH_SIZE
,
7143 sizeof(struct list_head
),
7145 if (!nn
->sessionid_hashtbl
)
7148 for (i
= 0; i
< CLIENT_HASH_SIZE
; i
++) {
7149 INIT_LIST_HEAD(&nn
->conf_id_hashtbl
[i
]);
7150 INIT_LIST_HEAD(&nn
->unconf_id_hashtbl
[i
]);
7152 for (i
= 0; i
< SESSION_HASH_SIZE
; i
++)
7153 INIT_LIST_HEAD(&nn
->sessionid_hashtbl
[i
]);
7154 nn
->conf_name_tree
= RB_ROOT
;
7155 nn
->unconf_name_tree
= RB_ROOT
;
7156 nn
->boot_time
= get_seconds();
7157 nn
->grace_ended
= false;
7158 nn
->nfsd4_manager
.block_opens
= true;
7159 INIT_LIST_HEAD(&nn
->nfsd4_manager
.list
);
7160 INIT_LIST_HEAD(&nn
->client_lru
);
7161 INIT_LIST_HEAD(&nn
->close_lru
);
7162 INIT_LIST_HEAD(&nn
->del_recall_lru
);
7163 spin_lock_init(&nn
->client_lock
);
7165 spin_lock_init(&nn
->blocked_locks_lock
);
7166 INIT_LIST_HEAD(&nn
->blocked_locks_lru
);
7168 INIT_DELAYED_WORK(&nn
->laundromat_work
, laundromat_main
);
7174 kfree(nn
->unconf_id_hashtbl
);
7176 kfree(nn
->conf_id_hashtbl
);
7182 nfs4_state_destroy_net(struct net
*net
)
7185 struct nfs4_client
*clp
= NULL
;
7186 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
7188 for (i
= 0; i
< CLIENT_HASH_SIZE
; i
++) {
7189 while (!list_empty(&nn
->conf_id_hashtbl
[i
])) {
7190 clp
= list_entry(nn
->conf_id_hashtbl
[i
].next
, struct nfs4_client
, cl_idhash
);
7191 destroy_client(clp
);
7195 WARN_ON(!list_empty(&nn
->blocked_locks_lru
));
7197 for (i
= 0; i
< CLIENT_HASH_SIZE
; i
++) {
7198 while (!list_empty(&nn
->unconf_id_hashtbl
[i
])) {
7199 clp
= list_entry(nn
->unconf_id_hashtbl
[i
].next
, struct nfs4_client
, cl_idhash
);
7200 destroy_client(clp
);
7204 kfree(nn
->sessionid_hashtbl
);
7205 kfree(nn
->unconf_id_hashtbl
);
7206 kfree(nn
->conf_id_hashtbl
);
7211 nfs4_state_start_net(struct net
*net
)
7213 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
7216 ret
= nfs4_state_create_net(net
);
7219 locks_start_grace(net
, &nn
->nfsd4_manager
);
7220 nfsd4_client_tracking_init(net
);
7221 printk(KERN_INFO
"NFSD: starting %ld-second grace period (net %x)\n",
7222 nn
->nfsd4_grace
, net
->ns
.inum
);
7223 queue_delayed_work(laundry_wq
, &nn
->laundromat_work
, nn
->nfsd4_grace
* HZ
);
7227 /* initialization to perform when the nfsd service is started: */
7230 nfs4_state_start(void)
7234 laundry_wq
= alloc_workqueue("%s", WQ_UNBOUND
, 0, "nfsd4");
7235 if (laundry_wq
== NULL
) {
7239 ret
= nfsd4_create_callback_queue();
7241 goto out_free_laundry
;
7243 set_max_delegations();
7247 destroy_workqueue(laundry_wq
);
7253 nfs4_state_shutdown_net(struct net
*net
)
7255 struct nfs4_delegation
*dp
= NULL
;
7256 struct list_head
*pos
, *next
, reaplist
;
7257 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
7259 cancel_delayed_work_sync(&nn
->laundromat_work
);
7260 locks_end_grace(&nn
->nfsd4_manager
);
7262 INIT_LIST_HEAD(&reaplist
);
7263 spin_lock(&state_lock
);
7264 list_for_each_safe(pos
, next
, &nn
->del_recall_lru
) {
7265 dp
= list_entry (pos
, struct nfs4_delegation
, dl_recall_lru
);
7266 WARN_ON(!unhash_delegation_locked(dp
));
7267 list_add(&dp
->dl_recall_lru
, &reaplist
);
7269 spin_unlock(&state_lock
);
7270 list_for_each_safe(pos
, next
, &reaplist
) {
7271 dp
= list_entry (pos
, struct nfs4_delegation
, dl_recall_lru
);
7272 list_del_init(&dp
->dl_recall_lru
);
7273 destroy_unhashed_deleg(dp
);
7276 nfsd4_client_tracking_exit(net
);
7277 nfs4_state_destroy_net(net
);
7281 nfs4_state_shutdown(void)
7283 destroy_workqueue(laundry_wq
);
7284 nfsd4_destroy_callback_queue();
7288 get_stateid(struct nfsd4_compound_state
*cstate
, stateid_t
*stateid
)
7290 if (HAS_STATE_ID(cstate
, CURRENT_STATE_ID_FLAG
) && CURRENT_STATEID(stateid
))
7291 memcpy(stateid
, &cstate
->current_stateid
, sizeof(stateid_t
));
7295 put_stateid(struct nfsd4_compound_state
*cstate
, stateid_t
*stateid
)
7297 if (cstate
->minorversion
) {
7298 memcpy(&cstate
->current_stateid
, stateid
, sizeof(stateid_t
));
7299 SET_STATE_ID(cstate
, CURRENT_STATE_ID_FLAG
);
7304 clear_current_stateid(struct nfsd4_compound_state
*cstate
)
7306 CLEAR_STATE_ID(cstate
, CURRENT_STATE_ID_FLAG
);
7310 * functions to set current state id
7313 nfsd4_set_opendowngradestateid(struct nfsd4_compound_state
*cstate
,
7314 union nfsd4_op_u
*u
)
7316 put_stateid(cstate
, &u
->open_downgrade
.od_stateid
);
7320 nfsd4_set_openstateid(struct nfsd4_compound_state
*cstate
,
7321 union nfsd4_op_u
*u
)
7323 put_stateid(cstate
, &u
->open
.op_stateid
);
7327 nfsd4_set_closestateid(struct nfsd4_compound_state
*cstate
,
7328 union nfsd4_op_u
*u
)
7330 put_stateid(cstate
, &u
->close
.cl_stateid
);
7334 nfsd4_set_lockstateid(struct nfsd4_compound_state
*cstate
,
7335 union nfsd4_op_u
*u
)
7337 put_stateid(cstate
, &u
->lock
.lk_resp_stateid
);
7341 * functions to consume current state id
7345 nfsd4_get_opendowngradestateid(struct nfsd4_compound_state
*cstate
,
7346 union nfsd4_op_u
*u
)
7348 get_stateid(cstate
, &u
->open_downgrade
.od_stateid
);
7352 nfsd4_get_delegreturnstateid(struct nfsd4_compound_state
*cstate
,
7353 union nfsd4_op_u
*u
)
7355 get_stateid(cstate
, &u
->delegreturn
.dr_stateid
);
7359 nfsd4_get_freestateid(struct nfsd4_compound_state
*cstate
,
7360 union nfsd4_op_u
*u
)
7362 get_stateid(cstate
, &u
->free_stateid
.fr_stateid
);
7366 nfsd4_get_setattrstateid(struct nfsd4_compound_state
*cstate
,
7367 union nfsd4_op_u
*u
)
7369 get_stateid(cstate
, &u
->setattr
.sa_stateid
);
7373 nfsd4_get_closestateid(struct nfsd4_compound_state
*cstate
,
7374 union nfsd4_op_u
*u
)
7376 get_stateid(cstate
, &u
->close
.cl_stateid
);
7380 nfsd4_get_lockustateid(struct nfsd4_compound_state
*cstate
,
7381 union nfsd4_op_u
*u
)
7383 get_stateid(cstate
, &u
->locku
.lu_stateid
);
7387 nfsd4_get_readstateid(struct nfsd4_compound_state
*cstate
,
7388 union nfsd4_op_u
*u
)
7390 get_stateid(cstate
, &u
->read
.rd_stateid
);
7394 nfsd4_get_writestateid(struct nfsd4_compound_state
*cstate
,
7395 union nfsd4_op_u
*u
)
7397 get_stateid(cstate
, &u
->write
.wr_stateid
);