2 * Copyright (c) 2001 The Regents of the University of Michigan.
5 * Kendrick Smith <kmsmith@umich.edu>
6 * Andy Adamson <kandros@umich.edu>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the name of the University nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
22 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
23 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
28 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
29 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
30 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
31 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 #include <linux/file.h>
37 #include <linux/slab.h>
38 #include <linux/namei.h>
39 #include <linux/swap.h>
40 #include <linux/pagemap.h>
41 #include <linux/ratelimit.h>
42 #include <linux/sunrpc/svcauth_gss.h>
43 #include <linux/sunrpc/addr.h>
44 #include <linux/jhash.h>
48 #include "current_stateid.h"
53 #define NFSDDBG_FACILITY NFSDDBG_PROC
55 #define all_ones {{~0,~0},~0}
56 static const stateid_t one_stateid
= {
58 .si_opaque
= all_ones
,
60 static const stateid_t zero_stateid
= {
63 static const stateid_t currentstateid
= {
66 static const stateid_t close_stateid
= {
67 .si_generation
= 0xffffffffU
,
70 static u64 current_sessionid
= 1;
72 #define ZERO_STATEID(stateid) (!memcmp((stateid), &zero_stateid, sizeof(stateid_t)))
73 #define ONE_STATEID(stateid) (!memcmp((stateid), &one_stateid, sizeof(stateid_t)))
74 #define CURRENT_STATEID(stateid) (!memcmp((stateid), ¤tstateid, sizeof(stateid_t)))
75 #define CLOSE_STATEID(stateid) (!memcmp((stateid), &close_stateid, sizeof(stateid_t)))
77 /* forward declarations */
78 static bool check_for_locks(struct nfs4_file
*fp
, struct nfs4_lockowner
*lowner
);
79 static void nfs4_free_ol_stateid(struct nfs4_stid
*stid
);
84 * Currently used for the del_recall_lru and file hash table. In an
85 * effort to decrease the scope of the client_mutex, this spinlock may
86 * eventually cover more:
88 static DEFINE_SPINLOCK(state_lock
);
91 * A waitqueue for all in-progress 4.0 CLOSE operations that are waiting for
92 * the refcount on the open stateid to drop.
94 static DECLARE_WAIT_QUEUE_HEAD(close_wq
);
96 static struct kmem_cache
*openowner_slab
;
97 static struct kmem_cache
*lockowner_slab
;
98 static struct kmem_cache
*file_slab
;
99 static struct kmem_cache
*stateid_slab
;
100 static struct kmem_cache
*deleg_slab
;
101 static struct kmem_cache
*odstate_slab
;
103 static void free_session(struct nfsd4_session
*);
105 static struct nfsd4_callback_ops nfsd4_cb_recall_ops
;
107 static bool is_session_dead(struct nfsd4_session
*ses
)
109 return ses
->se_flags
& NFS4_SESSION_DEAD
;
112 static __be32
mark_session_dead_locked(struct nfsd4_session
*ses
, int ref_held_by_me
)
114 if (atomic_read(&ses
->se_ref
) > ref_held_by_me
)
115 return nfserr_jukebox
;
116 ses
->se_flags
|= NFS4_SESSION_DEAD
;
120 static bool is_client_expired(struct nfs4_client
*clp
)
122 return clp
->cl_time
== 0;
125 static __be32
get_client_locked(struct nfs4_client
*clp
)
127 struct nfsd_net
*nn
= net_generic(clp
->net
, nfsd_net_id
);
129 lockdep_assert_held(&nn
->client_lock
);
131 if (is_client_expired(clp
))
132 return nfserr_expired
;
133 atomic_inc(&clp
->cl_refcount
);
137 /* must be called under the client_lock */
139 renew_client_locked(struct nfs4_client
*clp
)
141 struct nfsd_net
*nn
= net_generic(clp
->net
, nfsd_net_id
);
143 if (is_client_expired(clp
)) {
145 printk("%s: client (clientid %08x/%08x) already expired\n",
147 clp
->cl_clientid
.cl_boot
,
148 clp
->cl_clientid
.cl_id
);
152 dprintk("renewing client (clientid %08x/%08x)\n",
153 clp
->cl_clientid
.cl_boot
,
154 clp
->cl_clientid
.cl_id
);
155 list_move_tail(&clp
->cl_lru
, &nn
->client_lru
);
156 clp
->cl_time
= get_seconds();
159 static void put_client_renew_locked(struct nfs4_client
*clp
)
161 struct nfsd_net
*nn
= net_generic(clp
->net
, nfsd_net_id
);
163 lockdep_assert_held(&nn
->client_lock
);
165 if (!atomic_dec_and_test(&clp
->cl_refcount
))
167 if (!is_client_expired(clp
))
168 renew_client_locked(clp
);
171 static void put_client_renew(struct nfs4_client
*clp
)
173 struct nfsd_net
*nn
= net_generic(clp
->net
, nfsd_net_id
);
175 if (!atomic_dec_and_lock(&clp
->cl_refcount
, &nn
->client_lock
))
177 if (!is_client_expired(clp
))
178 renew_client_locked(clp
);
179 spin_unlock(&nn
->client_lock
);
182 static __be32
nfsd4_get_session_locked(struct nfsd4_session
*ses
)
186 if (is_session_dead(ses
))
187 return nfserr_badsession
;
188 status
= get_client_locked(ses
->se_client
);
191 atomic_inc(&ses
->se_ref
);
195 static void nfsd4_put_session_locked(struct nfsd4_session
*ses
)
197 struct nfs4_client
*clp
= ses
->se_client
;
198 struct nfsd_net
*nn
= net_generic(clp
->net
, nfsd_net_id
);
200 lockdep_assert_held(&nn
->client_lock
);
202 if (atomic_dec_and_test(&ses
->se_ref
) && is_session_dead(ses
))
204 put_client_renew_locked(clp
);
207 static void nfsd4_put_session(struct nfsd4_session
*ses
)
209 struct nfs4_client
*clp
= ses
->se_client
;
210 struct nfsd_net
*nn
= net_generic(clp
->net
, nfsd_net_id
);
212 spin_lock(&nn
->client_lock
);
213 nfsd4_put_session_locked(ses
);
214 spin_unlock(&nn
->client_lock
);
217 static inline struct nfs4_stateowner
*
218 nfs4_get_stateowner(struct nfs4_stateowner
*sop
)
220 atomic_inc(&sop
->so_count
);
225 same_owner_str(struct nfs4_stateowner
*sop
, struct xdr_netobj
*owner
)
227 return (sop
->so_owner
.len
== owner
->len
) &&
228 0 == memcmp(sop
->so_owner
.data
, owner
->data
, owner
->len
);
231 static struct nfs4_openowner
*
232 find_openstateowner_str_locked(unsigned int hashval
, struct nfsd4_open
*open
,
233 struct nfs4_client
*clp
)
235 struct nfs4_stateowner
*so
;
237 lockdep_assert_held(&clp
->cl_lock
);
239 list_for_each_entry(so
, &clp
->cl_ownerstr_hashtbl
[hashval
],
241 if (!so
->so_is_open_owner
)
243 if (same_owner_str(so
, &open
->op_owner
))
244 return openowner(nfs4_get_stateowner(so
));
249 static struct nfs4_openowner
*
250 find_openstateowner_str(unsigned int hashval
, struct nfsd4_open
*open
,
251 struct nfs4_client
*clp
)
253 struct nfs4_openowner
*oo
;
255 spin_lock(&clp
->cl_lock
);
256 oo
= find_openstateowner_str_locked(hashval
, open
, clp
);
257 spin_unlock(&clp
->cl_lock
);
262 opaque_hashval(const void *ptr
, int nbytes
)
264 unsigned char *cptr
= (unsigned char *) ptr
;
274 static void nfsd4_free_file_rcu(struct rcu_head
*rcu
)
276 struct nfs4_file
*fp
= container_of(rcu
, struct nfs4_file
, fi_rcu
);
278 kmem_cache_free(file_slab
, fp
);
282 put_nfs4_file(struct nfs4_file
*fi
)
284 might_lock(&state_lock
);
286 if (atomic_dec_and_lock(&fi
->fi_ref
, &state_lock
)) {
287 hlist_del_rcu(&fi
->fi_hash
);
288 spin_unlock(&state_lock
);
289 WARN_ON_ONCE(!list_empty(&fi
->fi_clnt_odstate
));
290 WARN_ON_ONCE(!list_empty(&fi
->fi_delegations
));
291 call_rcu(&fi
->fi_rcu
, nfsd4_free_file_rcu
);
296 __nfs4_get_fd(struct nfs4_file
*f
, int oflag
)
298 if (f
->fi_fds
[oflag
])
299 return get_file(f
->fi_fds
[oflag
]);
304 find_writeable_file_locked(struct nfs4_file
*f
)
308 lockdep_assert_held(&f
->fi_lock
);
310 ret
= __nfs4_get_fd(f
, O_WRONLY
);
312 ret
= __nfs4_get_fd(f
, O_RDWR
);
317 find_writeable_file(struct nfs4_file
*f
)
321 spin_lock(&f
->fi_lock
);
322 ret
= find_writeable_file_locked(f
);
323 spin_unlock(&f
->fi_lock
);
328 static struct file
*find_readable_file_locked(struct nfs4_file
*f
)
332 lockdep_assert_held(&f
->fi_lock
);
334 ret
= __nfs4_get_fd(f
, O_RDONLY
);
336 ret
= __nfs4_get_fd(f
, O_RDWR
);
341 find_readable_file(struct nfs4_file
*f
)
345 spin_lock(&f
->fi_lock
);
346 ret
= find_readable_file_locked(f
);
347 spin_unlock(&f
->fi_lock
);
353 find_any_file(struct nfs4_file
*f
)
357 spin_lock(&f
->fi_lock
);
358 ret
= __nfs4_get_fd(f
, O_RDWR
);
360 ret
= __nfs4_get_fd(f
, O_WRONLY
);
362 ret
= __nfs4_get_fd(f
, O_RDONLY
);
364 spin_unlock(&f
->fi_lock
);
368 static atomic_long_t num_delegations
;
369 unsigned long max_delegations
;
372 * Open owner state (share locks)
375 /* hash tables for lock and open owners */
376 #define OWNER_HASH_BITS 8
377 #define OWNER_HASH_SIZE (1 << OWNER_HASH_BITS)
378 #define OWNER_HASH_MASK (OWNER_HASH_SIZE - 1)
380 static unsigned int ownerstr_hashval(struct xdr_netobj
*ownername
)
384 ret
= opaque_hashval(ownername
->data
, ownername
->len
);
385 return ret
& OWNER_HASH_MASK
;
388 /* hash table for nfs4_file */
389 #define FILE_HASH_BITS 8
390 #define FILE_HASH_SIZE (1 << FILE_HASH_BITS)
392 static unsigned int nfsd_fh_hashval(struct knfsd_fh
*fh
)
394 return jhash2(fh
->fh_base
.fh_pad
, XDR_QUADLEN(fh
->fh_size
), 0);
397 static unsigned int file_hashval(struct knfsd_fh
*fh
)
399 return nfsd_fh_hashval(fh
) & (FILE_HASH_SIZE
- 1);
402 static struct hlist_head file_hashtbl
[FILE_HASH_SIZE
];
405 __nfs4_file_get_access(struct nfs4_file
*fp
, u32 access
)
407 lockdep_assert_held(&fp
->fi_lock
);
409 if (access
& NFS4_SHARE_ACCESS_WRITE
)
410 atomic_inc(&fp
->fi_access
[O_WRONLY
]);
411 if (access
& NFS4_SHARE_ACCESS_READ
)
412 atomic_inc(&fp
->fi_access
[O_RDONLY
]);
416 nfs4_file_get_access(struct nfs4_file
*fp
, u32 access
)
418 lockdep_assert_held(&fp
->fi_lock
);
420 /* Does this access mode make sense? */
421 if (access
& ~NFS4_SHARE_ACCESS_BOTH
)
424 /* Does it conflict with a deny mode already set? */
425 if ((access
& fp
->fi_share_deny
) != 0)
426 return nfserr_share_denied
;
428 __nfs4_file_get_access(fp
, access
);
432 static __be32
nfs4_file_check_deny(struct nfs4_file
*fp
, u32 deny
)
434 /* Common case is that there is no deny mode. */
436 /* Does this deny mode make sense? */
437 if (deny
& ~NFS4_SHARE_DENY_BOTH
)
440 if ((deny
& NFS4_SHARE_DENY_READ
) &&
441 atomic_read(&fp
->fi_access
[O_RDONLY
]))
442 return nfserr_share_denied
;
444 if ((deny
& NFS4_SHARE_DENY_WRITE
) &&
445 atomic_read(&fp
->fi_access
[O_WRONLY
]))
446 return nfserr_share_denied
;
451 static void __nfs4_file_put_access(struct nfs4_file
*fp
, int oflag
)
453 might_lock(&fp
->fi_lock
);
455 if (atomic_dec_and_lock(&fp
->fi_access
[oflag
], &fp
->fi_lock
)) {
456 struct file
*f1
= NULL
;
457 struct file
*f2
= NULL
;
459 swap(f1
, fp
->fi_fds
[oflag
]);
460 if (atomic_read(&fp
->fi_access
[1 - oflag
]) == 0)
461 swap(f2
, fp
->fi_fds
[O_RDWR
]);
462 spin_unlock(&fp
->fi_lock
);
470 static void nfs4_file_put_access(struct nfs4_file
*fp
, u32 access
)
472 WARN_ON_ONCE(access
& ~NFS4_SHARE_ACCESS_BOTH
);
474 if (access
& NFS4_SHARE_ACCESS_WRITE
)
475 __nfs4_file_put_access(fp
, O_WRONLY
);
476 if (access
& NFS4_SHARE_ACCESS_READ
)
477 __nfs4_file_put_access(fp
, O_RDONLY
);
481 * Allocate a new open/delegation state counter. This is needed for
482 * pNFS for proper return on close semantics.
484 * Note that we only allocate it for pNFS-enabled exports, otherwise
485 * all pointers to struct nfs4_clnt_odstate are always NULL.
487 static struct nfs4_clnt_odstate
*
488 alloc_clnt_odstate(struct nfs4_client
*clp
)
490 struct nfs4_clnt_odstate
*co
;
492 co
= kmem_cache_zalloc(odstate_slab
, GFP_KERNEL
);
495 atomic_set(&co
->co_odcount
, 1);
501 hash_clnt_odstate_locked(struct nfs4_clnt_odstate
*co
)
503 struct nfs4_file
*fp
= co
->co_file
;
505 lockdep_assert_held(&fp
->fi_lock
);
506 list_add(&co
->co_perfile
, &fp
->fi_clnt_odstate
);
510 get_clnt_odstate(struct nfs4_clnt_odstate
*co
)
513 atomic_inc(&co
->co_odcount
);
517 put_clnt_odstate(struct nfs4_clnt_odstate
*co
)
519 struct nfs4_file
*fp
;
525 if (atomic_dec_and_lock(&co
->co_odcount
, &fp
->fi_lock
)) {
526 list_del(&co
->co_perfile
);
527 spin_unlock(&fp
->fi_lock
);
529 nfsd4_return_all_file_layouts(co
->co_client
, fp
);
530 kmem_cache_free(odstate_slab
, co
);
534 static struct nfs4_clnt_odstate
*
535 find_or_hash_clnt_odstate(struct nfs4_file
*fp
, struct nfs4_clnt_odstate
*new)
537 struct nfs4_clnt_odstate
*co
;
538 struct nfs4_client
*cl
;
545 spin_lock(&fp
->fi_lock
);
546 list_for_each_entry(co
, &fp
->fi_clnt_odstate
, co_perfile
) {
547 if (co
->co_client
== cl
) {
548 get_clnt_odstate(co
);
554 hash_clnt_odstate_locked(new);
556 spin_unlock(&fp
->fi_lock
);
560 struct nfs4_stid
*nfs4_alloc_stid(struct nfs4_client
*cl
, struct kmem_cache
*slab
,
561 void (*sc_free
)(struct nfs4_stid
*))
563 struct nfs4_stid
*stid
;
566 stid
= kmem_cache_zalloc(slab
, GFP_KERNEL
);
570 idr_preload(GFP_KERNEL
);
571 spin_lock(&cl
->cl_lock
);
572 new_id
= idr_alloc_cyclic(&cl
->cl_stateids
, stid
, 0, 0, GFP_NOWAIT
);
573 spin_unlock(&cl
->cl_lock
);
578 stid
->sc_free
= sc_free
;
579 stid
->sc_client
= cl
;
580 stid
->sc_stateid
.si_opaque
.so_id
= new_id
;
581 stid
->sc_stateid
.si_opaque
.so_clid
= cl
->cl_clientid
;
582 /* Will be incremented before return to client: */
583 atomic_set(&stid
->sc_count
, 1);
584 spin_lock_init(&stid
->sc_lock
);
587 * It shouldn't be a problem to reuse an opaque stateid value.
588 * I don't think it is for 4.1. But with 4.0 I worry that, for
589 * example, a stray write retransmission could be accepted by
590 * the server when it should have been rejected. Therefore,
591 * adopt a trick from the sctp code to attempt to maximize the
592 * amount of time until an id is reused, by ensuring they always
593 * "increase" (mod INT_MAX):
597 kmem_cache_free(slab
, stid
);
601 static struct nfs4_ol_stateid
* nfs4_alloc_open_stateid(struct nfs4_client
*clp
)
603 struct nfs4_stid
*stid
;
605 stid
= nfs4_alloc_stid(clp
, stateid_slab
, nfs4_free_ol_stateid
);
609 return openlockstateid(stid
);
612 static void nfs4_free_deleg(struct nfs4_stid
*stid
)
614 kmem_cache_free(deleg_slab
, stid
);
615 atomic_long_dec(&num_delegations
);
619 * When we recall a delegation, we should be careful not to hand it
620 * out again straight away.
621 * To ensure this we keep a pair of bloom filters ('new' and 'old')
622 * in which the filehandles of recalled delegations are "stored".
623 * If a filehandle appear in either filter, a delegation is blocked.
624 * When a delegation is recalled, the filehandle is stored in the "new"
626 * Every 30 seconds we swap the filters and clear the "new" one,
627 * unless both are empty of course.
629 * Each filter is 256 bits. We hash the filehandle to 32bit and use the
630 * low 3 bytes as hash-table indices.
632 * 'blocked_delegations_lock', which is always taken in block_delegations(),
633 * is used to manage concurrent access. Testing does not need the lock
634 * except when swapping the two filters.
636 static DEFINE_SPINLOCK(blocked_delegations_lock
);
637 static struct bloom_pair
{
638 int entries
, old_entries
;
640 int new; /* index into 'set' */
641 DECLARE_BITMAP(set
[2], 256);
642 } blocked_delegations
;
644 static int delegation_blocked(struct knfsd_fh
*fh
)
647 struct bloom_pair
*bd
= &blocked_delegations
;
649 if (bd
->entries
== 0)
651 if (seconds_since_boot() - bd
->swap_time
> 30) {
652 spin_lock(&blocked_delegations_lock
);
653 if (seconds_since_boot() - bd
->swap_time
> 30) {
654 bd
->entries
-= bd
->old_entries
;
655 bd
->old_entries
= bd
->entries
;
656 memset(bd
->set
[bd
->new], 0,
659 bd
->swap_time
= seconds_since_boot();
661 spin_unlock(&blocked_delegations_lock
);
663 hash
= jhash(&fh
->fh_base
, fh
->fh_size
, 0);
664 if (test_bit(hash
&255, bd
->set
[0]) &&
665 test_bit((hash
>>8)&255, bd
->set
[0]) &&
666 test_bit((hash
>>16)&255, bd
->set
[0]))
669 if (test_bit(hash
&255, bd
->set
[1]) &&
670 test_bit((hash
>>8)&255, bd
->set
[1]) &&
671 test_bit((hash
>>16)&255, bd
->set
[1]))
677 static void block_delegations(struct knfsd_fh
*fh
)
680 struct bloom_pair
*bd
= &blocked_delegations
;
682 hash
= jhash(&fh
->fh_base
, fh
->fh_size
, 0);
684 spin_lock(&blocked_delegations_lock
);
685 __set_bit(hash
&255, bd
->set
[bd
->new]);
686 __set_bit((hash
>>8)&255, bd
->set
[bd
->new]);
687 __set_bit((hash
>>16)&255, bd
->set
[bd
->new]);
688 if (bd
->entries
== 0)
689 bd
->swap_time
= seconds_since_boot();
691 spin_unlock(&blocked_delegations_lock
);
694 static struct nfs4_delegation
*
695 alloc_init_deleg(struct nfs4_client
*clp
, struct svc_fh
*current_fh
,
696 struct nfs4_clnt_odstate
*odstate
)
698 struct nfs4_delegation
*dp
;
701 dprintk("NFSD alloc_init_deleg\n");
702 n
= atomic_long_inc_return(&num_delegations
);
703 if (n
< 0 || n
> max_delegations
)
705 if (delegation_blocked(¤t_fh
->fh_handle
))
707 dp
= delegstateid(nfs4_alloc_stid(clp
, deleg_slab
, nfs4_free_deleg
));
712 * delegation seqid's are never incremented. The 4.1 special
713 * meaning of seqid 0 isn't meaningful, really, but let's avoid
714 * 0 anyway just for consistency and use 1:
716 dp
->dl_stid
.sc_stateid
.si_generation
= 1;
717 INIT_LIST_HEAD(&dp
->dl_perfile
);
718 INIT_LIST_HEAD(&dp
->dl_perclnt
);
719 INIT_LIST_HEAD(&dp
->dl_recall_lru
);
720 dp
->dl_clnt_odstate
= odstate
;
721 get_clnt_odstate(odstate
);
722 dp
->dl_type
= NFS4_OPEN_DELEGATE_READ
;
724 nfsd4_init_cb(&dp
->dl_recall
, dp
->dl_stid
.sc_client
,
725 &nfsd4_cb_recall_ops
, NFSPROC4_CLNT_CB_RECALL
);
728 atomic_long_dec(&num_delegations
);
733 nfs4_put_stid(struct nfs4_stid
*s
)
735 struct nfs4_file
*fp
= s
->sc_file
;
736 struct nfs4_client
*clp
= s
->sc_client
;
738 might_lock(&clp
->cl_lock
);
740 if (!atomic_dec_and_lock(&s
->sc_count
, &clp
->cl_lock
)) {
741 wake_up_all(&close_wq
);
744 idr_remove(&clp
->cl_stateids
, s
->sc_stateid
.si_opaque
.so_id
);
745 spin_unlock(&clp
->cl_lock
);
752 nfs4_inc_and_copy_stateid(stateid_t
*dst
, struct nfs4_stid
*stid
)
754 stateid_t
*src
= &stid
->sc_stateid
;
756 spin_lock(&stid
->sc_lock
);
757 if (unlikely(++src
->si_generation
== 0))
758 src
->si_generation
= 1;
759 memcpy(dst
, src
, sizeof(*dst
));
760 spin_unlock(&stid
->sc_lock
);
763 static void nfs4_put_deleg_lease(struct nfs4_file
*fp
)
765 struct file
*filp
= NULL
;
767 spin_lock(&fp
->fi_lock
);
768 if (fp
->fi_deleg_file
&& --fp
->fi_delegees
== 0)
769 swap(filp
, fp
->fi_deleg_file
);
770 spin_unlock(&fp
->fi_lock
);
773 vfs_setlease(filp
, F_UNLCK
, NULL
, (void **)&fp
);
778 void nfs4_unhash_stid(struct nfs4_stid
*s
)
784 * nfs4_get_existing_delegation - Discover if this delegation already exists
785 * @clp: a pointer to the nfs4_client we're granting a delegation to
786 * @fp: a pointer to the nfs4_file we're granting a delegation on
789 * On success: NULL if an existing delegation was not found.
791 * On error: -EAGAIN if one was previously granted to this nfs4_client
792 * for this nfs4_file.
797 nfs4_get_existing_delegation(struct nfs4_client
*clp
, struct nfs4_file
*fp
)
799 struct nfs4_delegation
*searchdp
= NULL
;
800 struct nfs4_client
*searchclp
= NULL
;
802 lockdep_assert_held(&state_lock
);
803 lockdep_assert_held(&fp
->fi_lock
);
805 list_for_each_entry(searchdp
, &fp
->fi_delegations
, dl_perfile
) {
806 searchclp
= searchdp
->dl_stid
.sc_client
;
807 if (clp
== searchclp
) {
815 * hash_delegation_locked - Add a delegation to the appropriate lists
816 * @dp: a pointer to the nfs4_delegation we are adding.
817 * @fp: a pointer to the nfs4_file we're granting a delegation on
820 * On success: NULL if the delegation was successfully hashed.
822 * On error: -EAGAIN if one was previously granted to this
823 * nfs4_client for this nfs4_file. Delegation is not hashed.
828 hash_delegation_locked(struct nfs4_delegation
*dp
, struct nfs4_file
*fp
)
831 struct nfs4_client
*clp
= dp
->dl_stid
.sc_client
;
833 lockdep_assert_held(&state_lock
);
834 lockdep_assert_held(&fp
->fi_lock
);
836 status
= nfs4_get_existing_delegation(clp
, fp
);
840 atomic_inc(&dp
->dl_stid
.sc_count
);
841 dp
->dl_stid
.sc_type
= NFS4_DELEG_STID
;
842 list_add(&dp
->dl_perfile
, &fp
->fi_delegations
);
843 list_add(&dp
->dl_perclnt
, &clp
->cl_delegations
);
848 unhash_delegation_locked(struct nfs4_delegation
*dp
)
850 struct nfs4_file
*fp
= dp
->dl_stid
.sc_file
;
852 lockdep_assert_held(&state_lock
);
854 if (list_empty(&dp
->dl_perfile
))
857 dp
->dl_stid
.sc_type
= NFS4_CLOSED_DELEG_STID
;
858 /* Ensure that deleg break won't try to requeue it */
860 spin_lock(&fp
->fi_lock
);
861 list_del_init(&dp
->dl_perclnt
);
862 list_del_init(&dp
->dl_recall_lru
);
863 list_del_init(&dp
->dl_perfile
);
864 spin_unlock(&fp
->fi_lock
);
868 static void destroy_delegation(struct nfs4_delegation
*dp
)
872 spin_lock(&state_lock
);
873 unhashed
= unhash_delegation_locked(dp
);
874 spin_unlock(&state_lock
);
876 put_clnt_odstate(dp
->dl_clnt_odstate
);
877 nfs4_put_deleg_lease(dp
->dl_stid
.sc_file
);
878 nfs4_put_stid(&dp
->dl_stid
);
882 static void revoke_delegation(struct nfs4_delegation
*dp
)
884 struct nfs4_client
*clp
= dp
->dl_stid
.sc_client
;
886 WARN_ON(!list_empty(&dp
->dl_recall_lru
));
888 put_clnt_odstate(dp
->dl_clnt_odstate
);
889 nfs4_put_deleg_lease(dp
->dl_stid
.sc_file
);
891 if (clp
->cl_minorversion
== 0)
892 nfs4_put_stid(&dp
->dl_stid
);
894 dp
->dl_stid
.sc_type
= NFS4_REVOKED_DELEG_STID
;
895 spin_lock(&clp
->cl_lock
);
896 list_add(&dp
->dl_recall_lru
, &clp
->cl_revoked
);
897 spin_unlock(&clp
->cl_lock
);
905 static unsigned int clientid_hashval(u32 id
)
907 return id
& CLIENT_HASH_MASK
;
910 static unsigned int clientstr_hashval(const char *name
)
912 return opaque_hashval(name
, 8) & CLIENT_HASH_MASK
;
916 * We store the NONE, READ, WRITE, and BOTH bits separately in the
917 * st_{access,deny}_bmap field of the stateid, in order to track not
918 * only what share bits are currently in force, but also what
919 * combinations of share bits previous opens have used. This allows us
920 * to enforce the recommendation of rfc 3530 14.2.19 that the server
921 * return an error if the client attempt to downgrade to a combination
922 * of share bits not explicable by closing some of its previous opens.
924 * XXX: This enforcement is actually incomplete, since we don't keep
925 * track of access/deny bit combinations; so, e.g., we allow:
927 * OPEN allow read, deny write
928 * OPEN allow both, deny none
929 * DOWNGRADE allow read, deny none
931 * which we should reject.
934 bmap_to_share_mode(unsigned long bmap
) {
936 unsigned int access
= 0;
938 for (i
= 1; i
< 4; i
++) {
939 if (test_bit(i
, &bmap
))
945 /* set share access for a given stateid */
947 set_access(u32 access
, struct nfs4_ol_stateid
*stp
)
949 unsigned char mask
= 1 << access
;
951 WARN_ON_ONCE(access
> NFS4_SHARE_ACCESS_BOTH
);
952 stp
->st_access_bmap
|= mask
;
955 /* clear share access for a given stateid */
957 clear_access(u32 access
, struct nfs4_ol_stateid
*stp
)
959 unsigned char mask
= 1 << access
;
961 WARN_ON_ONCE(access
> NFS4_SHARE_ACCESS_BOTH
);
962 stp
->st_access_bmap
&= ~mask
;
965 /* test whether a given stateid has access */
967 test_access(u32 access
, struct nfs4_ol_stateid
*stp
)
969 unsigned char mask
= 1 << access
;
971 return (bool)(stp
->st_access_bmap
& mask
);
974 /* set share deny for a given stateid */
976 set_deny(u32 deny
, struct nfs4_ol_stateid
*stp
)
978 unsigned char mask
= 1 << deny
;
980 WARN_ON_ONCE(deny
> NFS4_SHARE_DENY_BOTH
);
981 stp
->st_deny_bmap
|= mask
;
984 /* clear share deny for a given stateid */
986 clear_deny(u32 deny
, struct nfs4_ol_stateid
*stp
)
988 unsigned char mask
= 1 << deny
;
990 WARN_ON_ONCE(deny
> NFS4_SHARE_DENY_BOTH
);
991 stp
->st_deny_bmap
&= ~mask
;
994 /* test whether a given stateid is denying specific access */
996 test_deny(u32 deny
, struct nfs4_ol_stateid
*stp
)
998 unsigned char mask
= 1 << deny
;
1000 return (bool)(stp
->st_deny_bmap
& mask
);
1003 static int nfs4_access_to_omode(u32 access
)
1005 switch (access
& NFS4_SHARE_ACCESS_BOTH
) {
1006 case NFS4_SHARE_ACCESS_READ
:
1008 case NFS4_SHARE_ACCESS_WRITE
:
1010 case NFS4_SHARE_ACCESS_BOTH
:
1018 * A stateid that had a deny mode associated with it is being released
1019 * or downgraded. Recalculate the deny mode on the file.
1022 recalculate_deny_mode(struct nfs4_file
*fp
)
1024 struct nfs4_ol_stateid
*stp
;
1026 spin_lock(&fp
->fi_lock
);
1027 fp
->fi_share_deny
= 0;
1028 list_for_each_entry(stp
, &fp
->fi_stateids
, st_perfile
)
1029 fp
->fi_share_deny
|= bmap_to_share_mode(stp
->st_deny_bmap
);
1030 spin_unlock(&fp
->fi_lock
);
1034 reset_union_bmap_deny(u32 deny
, struct nfs4_ol_stateid
*stp
)
1037 bool change
= false;
1039 for (i
= 1; i
< 4; i
++) {
1040 if ((i
& deny
) != i
) {
1046 /* Recalculate per-file deny mode if there was a change */
1048 recalculate_deny_mode(stp
->st_stid
.sc_file
);
1051 /* release all access and file references for a given stateid */
1053 release_all_access(struct nfs4_ol_stateid
*stp
)
1056 struct nfs4_file
*fp
= stp
->st_stid
.sc_file
;
1058 if (fp
&& stp
->st_deny_bmap
!= 0)
1059 recalculate_deny_mode(fp
);
1061 for (i
= 1; i
< 4; i
++) {
1062 if (test_access(i
, stp
))
1063 nfs4_file_put_access(stp
->st_stid
.sc_file
, i
);
1064 clear_access(i
, stp
);
1068 static inline void nfs4_free_stateowner(struct nfs4_stateowner
*sop
)
1070 kfree(sop
->so_owner
.data
);
1071 sop
->so_ops
->so_free(sop
);
1074 static void nfs4_put_stateowner(struct nfs4_stateowner
*sop
)
1076 struct nfs4_client
*clp
= sop
->so_client
;
1078 might_lock(&clp
->cl_lock
);
1080 if (!atomic_dec_and_lock(&sop
->so_count
, &clp
->cl_lock
))
1082 sop
->so_ops
->so_unhash(sop
);
1083 spin_unlock(&clp
->cl_lock
);
1084 nfs4_free_stateowner(sop
);
1087 static bool unhash_ol_stateid(struct nfs4_ol_stateid
*stp
)
1089 struct nfs4_file
*fp
= stp
->st_stid
.sc_file
;
1091 lockdep_assert_held(&stp
->st_stateowner
->so_client
->cl_lock
);
1093 if (list_empty(&stp
->st_perfile
))
1096 spin_lock(&fp
->fi_lock
);
1097 list_del_init(&stp
->st_perfile
);
1098 spin_unlock(&fp
->fi_lock
);
1099 list_del(&stp
->st_perstateowner
);
1103 static void nfs4_free_ol_stateid(struct nfs4_stid
*stid
)
1105 struct nfs4_ol_stateid
*stp
= openlockstateid(stid
);
1107 put_clnt_odstate(stp
->st_clnt_odstate
);
1108 release_all_access(stp
);
1109 if (stp
->st_stateowner
)
1110 nfs4_put_stateowner(stp
->st_stateowner
);
1111 kmem_cache_free(stateid_slab
, stid
);
1114 static void nfs4_free_lock_stateid(struct nfs4_stid
*stid
)
1116 struct nfs4_ol_stateid
*stp
= openlockstateid(stid
);
1117 struct nfs4_lockowner
*lo
= lockowner(stp
->st_stateowner
);
1120 file
= find_any_file(stp
->st_stid
.sc_file
);
1122 filp_close(file
, (fl_owner_t
)lo
);
1123 nfs4_free_ol_stateid(stid
);
1127 * Put the persistent reference to an already unhashed generic stateid, while
1128 * holding the cl_lock. If it's the last reference, then put it onto the
1129 * reaplist for later destruction.
1131 static void put_ol_stateid_locked(struct nfs4_ol_stateid
*stp
,
1132 struct list_head
*reaplist
)
1134 struct nfs4_stid
*s
= &stp
->st_stid
;
1135 struct nfs4_client
*clp
= s
->sc_client
;
1137 lockdep_assert_held(&clp
->cl_lock
);
1139 WARN_ON_ONCE(!list_empty(&stp
->st_locks
));
1141 if (!atomic_dec_and_test(&s
->sc_count
)) {
1142 wake_up_all(&close_wq
);
1146 idr_remove(&clp
->cl_stateids
, s
->sc_stateid
.si_opaque
.so_id
);
1147 list_add(&stp
->st_locks
, reaplist
);
1150 static bool unhash_lock_stateid(struct nfs4_ol_stateid
*stp
)
1152 lockdep_assert_held(&stp
->st_stid
.sc_client
->cl_lock
);
1154 list_del_init(&stp
->st_locks
);
1155 nfs4_unhash_stid(&stp
->st_stid
);
1156 return unhash_ol_stateid(stp
);
1159 static void release_lock_stateid(struct nfs4_ol_stateid
*stp
)
1161 struct nfs4_client
*clp
= stp
->st_stid
.sc_client
;
1164 spin_lock(&clp
->cl_lock
);
1165 unhashed
= unhash_lock_stateid(stp
);
1166 spin_unlock(&clp
->cl_lock
);
1168 nfs4_put_stid(&stp
->st_stid
);
1171 static void unhash_lockowner_locked(struct nfs4_lockowner
*lo
)
1173 struct nfs4_client
*clp
= lo
->lo_owner
.so_client
;
1175 lockdep_assert_held(&clp
->cl_lock
);
1177 list_del_init(&lo
->lo_owner
.so_strhash
);
1181 * Free a list of generic stateids that were collected earlier after being
1185 free_ol_stateid_reaplist(struct list_head
*reaplist
)
1187 struct nfs4_ol_stateid
*stp
;
1188 struct nfs4_file
*fp
;
1192 while (!list_empty(reaplist
)) {
1193 stp
= list_first_entry(reaplist
, struct nfs4_ol_stateid
,
1195 list_del(&stp
->st_locks
);
1196 fp
= stp
->st_stid
.sc_file
;
1197 stp
->st_stid
.sc_free(&stp
->st_stid
);
1203 static void release_open_stateid_locks(struct nfs4_ol_stateid
*open_stp
,
1204 struct list_head
*reaplist
)
1206 struct nfs4_ol_stateid
*stp
;
1208 lockdep_assert_held(&open_stp
->st_stid
.sc_client
->cl_lock
);
1210 while (!list_empty(&open_stp
->st_locks
)) {
1211 stp
= list_entry(open_stp
->st_locks
.next
,
1212 struct nfs4_ol_stateid
, st_locks
);
1213 WARN_ON(!unhash_lock_stateid(stp
));
1214 put_ol_stateid_locked(stp
, reaplist
);
1218 static bool unhash_open_stateid(struct nfs4_ol_stateid
*stp
,
1219 struct list_head
*reaplist
)
1223 lockdep_assert_held(&stp
->st_stid
.sc_client
->cl_lock
);
1225 unhashed
= unhash_ol_stateid(stp
);
1226 release_open_stateid_locks(stp
, reaplist
);
1230 static void release_open_stateid(struct nfs4_ol_stateid
*stp
)
1232 LIST_HEAD(reaplist
);
1234 spin_lock(&stp
->st_stid
.sc_client
->cl_lock
);
1235 if (unhash_open_stateid(stp
, &reaplist
))
1236 put_ol_stateid_locked(stp
, &reaplist
);
1237 spin_unlock(&stp
->st_stid
.sc_client
->cl_lock
);
1238 free_ol_stateid_reaplist(&reaplist
);
1241 static void unhash_openowner_locked(struct nfs4_openowner
*oo
)
1243 struct nfs4_client
*clp
= oo
->oo_owner
.so_client
;
1245 lockdep_assert_held(&clp
->cl_lock
);
1247 list_del_init(&oo
->oo_owner
.so_strhash
);
1248 list_del_init(&oo
->oo_perclient
);
1251 static void release_last_closed_stateid(struct nfs4_openowner
*oo
)
1253 struct nfsd_net
*nn
= net_generic(oo
->oo_owner
.so_client
->net
,
1255 struct nfs4_ol_stateid
*s
;
1257 spin_lock(&nn
->client_lock
);
1258 s
= oo
->oo_last_closed_stid
;
1260 list_del_init(&oo
->oo_close_lru
);
1261 oo
->oo_last_closed_stid
= NULL
;
1263 spin_unlock(&nn
->client_lock
);
1265 nfs4_put_stid(&s
->st_stid
);
1268 static void release_openowner(struct nfs4_openowner
*oo
)
1270 struct nfs4_ol_stateid
*stp
;
1271 struct nfs4_client
*clp
= oo
->oo_owner
.so_client
;
1272 struct list_head reaplist
;
1274 INIT_LIST_HEAD(&reaplist
);
1276 spin_lock(&clp
->cl_lock
);
1277 unhash_openowner_locked(oo
);
1278 while (!list_empty(&oo
->oo_owner
.so_stateids
)) {
1279 stp
= list_first_entry(&oo
->oo_owner
.so_stateids
,
1280 struct nfs4_ol_stateid
, st_perstateowner
);
1281 if (unhash_open_stateid(stp
, &reaplist
))
1282 put_ol_stateid_locked(stp
, &reaplist
);
1284 spin_unlock(&clp
->cl_lock
);
1285 free_ol_stateid_reaplist(&reaplist
);
1286 release_last_closed_stateid(oo
);
1287 nfs4_put_stateowner(&oo
->oo_owner
);
1291 hash_sessionid(struct nfs4_sessionid
*sessionid
)
1293 struct nfsd4_sessionid
*sid
= (struct nfsd4_sessionid
*)sessionid
;
1295 return sid
->sequence
% SESSION_HASH_SIZE
;
1298 #ifdef CONFIG_SUNRPC_DEBUG
1300 dump_sessionid(const char *fn
, struct nfs4_sessionid
*sessionid
)
1302 u32
*ptr
= (u32
*)(&sessionid
->data
[0]);
1303 dprintk("%s: %u:%u:%u:%u\n", fn
, ptr
[0], ptr
[1], ptr
[2], ptr
[3]);
1307 dump_sessionid(const char *fn
, struct nfs4_sessionid
*sessionid
)
1313 * Bump the seqid on cstate->replay_owner, and clear replay_owner if it
1314 * won't be used for replay.
1316 void nfsd4_bump_seqid(struct nfsd4_compound_state
*cstate
, __be32 nfserr
)
1318 struct nfs4_stateowner
*so
= cstate
->replay_owner
;
1320 if (nfserr
== nfserr_replay_me
)
1323 if (!seqid_mutating_err(ntohl(nfserr
))) {
1324 nfsd4_cstate_clear_replay(cstate
);
1329 if (so
->so_is_open_owner
)
1330 release_last_closed_stateid(openowner(so
));
1336 gen_sessionid(struct nfsd4_session
*ses
)
1338 struct nfs4_client
*clp
= ses
->se_client
;
1339 struct nfsd4_sessionid
*sid
;
1341 sid
= (struct nfsd4_sessionid
*)ses
->se_sessionid
.data
;
1342 sid
->clientid
= clp
->cl_clientid
;
1343 sid
->sequence
= current_sessionid
++;
1348 * The protocol defines ca_maxresponssize_cached to include the size of
1349 * the rpc header, but all we need to cache is the data starting after
1350 * the end of the initial SEQUENCE operation--the rest we regenerate
1351 * each time. Therefore we can advertise a ca_maxresponssize_cached
1352 * value that is the number of bytes in our cache plus a few additional
1353 * bytes. In order to stay on the safe side, and not promise more than
1354 * we can cache, those additional bytes must be the minimum possible: 24
1355 * bytes of rpc header (xid through accept state, with AUTH_NULL
1356 * verifier), 12 for the compound header (with zero-length tag), and 44
1357 * for the SEQUENCE op response:
1359 #define NFSD_MIN_HDR_SEQ_SZ (24 + 12 + 44)
1362 free_session_slots(struct nfsd4_session
*ses
)
1366 for (i
= 0; i
< ses
->se_fchannel
.maxreqs
; i
++)
1367 kfree(ses
->se_slots
[i
]);
1371 * We don't actually need to cache the rpc and session headers, so we
1372 * can allocate a little less for each slot:
1374 static inline u32
slot_bytes(struct nfsd4_channel_attrs
*ca
)
1378 if (ca
->maxresp_cached
< NFSD_MIN_HDR_SEQ_SZ
)
1381 size
= ca
->maxresp_cached
- NFSD_MIN_HDR_SEQ_SZ
;
1382 return size
+ sizeof(struct nfsd4_slot
);
1386 * XXX: If we run out of reserved DRC memory we could (up to a point)
1387 * re-negotiate active sessions and reduce their slot usage to make
1388 * room for new connections. For now we just fail the create session.
1390 static u32
nfsd4_get_drc_mem(struct nfsd4_channel_attrs
*ca
)
1392 u32 slotsize
= slot_bytes(ca
);
1393 u32 num
= ca
->maxreqs
;
1394 unsigned long avail
, total_avail
;
1396 spin_lock(&nfsd_drc_lock
);
1397 total_avail
= nfsd_drc_max_mem
- nfsd_drc_mem_used
;
1398 avail
= min((unsigned long)NFSD_MAX_MEM_PER_SESSION
, total_avail
);
1400 * Never use more than a third of the remaining memory,
1401 * unless it's the only way to give this client a slot:
1403 avail
= clamp_t(unsigned long, avail
, slotsize
, total_avail
/3);
1404 num
= min_t(int, num
, avail
/ slotsize
);
1405 nfsd_drc_mem_used
+= num
* slotsize
;
1406 spin_unlock(&nfsd_drc_lock
);
1411 static void nfsd4_put_drc_mem(struct nfsd4_channel_attrs
*ca
)
1413 int slotsize
= slot_bytes(ca
);
1415 spin_lock(&nfsd_drc_lock
);
1416 nfsd_drc_mem_used
-= slotsize
* ca
->maxreqs
;
1417 spin_unlock(&nfsd_drc_lock
);
1420 static struct nfsd4_session
*alloc_session(struct nfsd4_channel_attrs
*fattrs
,
1421 struct nfsd4_channel_attrs
*battrs
)
1423 int numslots
= fattrs
->maxreqs
;
1424 int slotsize
= slot_bytes(fattrs
);
1425 struct nfsd4_session
*new;
1428 BUILD_BUG_ON(NFSD_MAX_SLOTS_PER_SESSION
* sizeof(struct nfsd4_slot
*)
1429 + sizeof(struct nfsd4_session
) > PAGE_SIZE
);
1430 mem
= numslots
* sizeof(struct nfsd4_slot
*);
1432 new = kzalloc(sizeof(*new) + mem
, GFP_KERNEL
);
1435 /* allocate each struct nfsd4_slot and data cache in one piece */
1436 for (i
= 0; i
< numslots
; i
++) {
1437 new->se_slots
[i
] = kzalloc(slotsize
, GFP_KERNEL
);
1438 if (!new->se_slots
[i
])
1442 memcpy(&new->se_fchannel
, fattrs
, sizeof(struct nfsd4_channel_attrs
));
1443 memcpy(&new->se_bchannel
, battrs
, sizeof(struct nfsd4_channel_attrs
));
1448 kfree(new->se_slots
[i
]);
1453 static void free_conn(struct nfsd4_conn
*c
)
1455 svc_xprt_put(c
->cn_xprt
);
1459 static void nfsd4_conn_lost(struct svc_xpt_user
*u
)
1461 struct nfsd4_conn
*c
= container_of(u
, struct nfsd4_conn
, cn_xpt_user
);
1462 struct nfs4_client
*clp
= c
->cn_session
->se_client
;
1464 spin_lock(&clp
->cl_lock
);
1465 if (!list_empty(&c
->cn_persession
)) {
1466 list_del(&c
->cn_persession
);
1469 nfsd4_probe_callback(clp
);
1470 spin_unlock(&clp
->cl_lock
);
1473 static struct nfsd4_conn
*alloc_conn(struct svc_rqst
*rqstp
, u32 flags
)
1475 struct nfsd4_conn
*conn
;
1477 conn
= kmalloc(sizeof(struct nfsd4_conn
), GFP_KERNEL
);
1480 svc_xprt_get(rqstp
->rq_xprt
);
1481 conn
->cn_xprt
= rqstp
->rq_xprt
;
1482 conn
->cn_flags
= flags
;
1483 INIT_LIST_HEAD(&conn
->cn_xpt_user
.list
);
1487 static void __nfsd4_hash_conn(struct nfsd4_conn
*conn
, struct nfsd4_session
*ses
)
1489 conn
->cn_session
= ses
;
1490 list_add(&conn
->cn_persession
, &ses
->se_conns
);
1493 static void nfsd4_hash_conn(struct nfsd4_conn
*conn
, struct nfsd4_session
*ses
)
1495 struct nfs4_client
*clp
= ses
->se_client
;
1497 spin_lock(&clp
->cl_lock
);
1498 __nfsd4_hash_conn(conn
, ses
);
1499 spin_unlock(&clp
->cl_lock
);
1502 static int nfsd4_register_conn(struct nfsd4_conn
*conn
)
1504 conn
->cn_xpt_user
.callback
= nfsd4_conn_lost
;
1505 return register_xpt_user(conn
->cn_xprt
, &conn
->cn_xpt_user
);
1508 static void nfsd4_init_conn(struct svc_rqst
*rqstp
, struct nfsd4_conn
*conn
, struct nfsd4_session
*ses
)
1512 nfsd4_hash_conn(conn
, ses
);
1513 ret
= nfsd4_register_conn(conn
);
1515 /* oops; xprt is already down: */
1516 nfsd4_conn_lost(&conn
->cn_xpt_user
);
1517 /* We may have gained or lost a callback channel: */
1518 nfsd4_probe_callback_sync(ses
->se_client
);
1521 static struct nfsd4_conn
*alloc_conn_from_crses(struct svc_rqst
*rqstp
, struct nfsd4_create_session
*cses
)
1523 u32 dir
= NFS4_CDFC4_FORE
;
1525 if (cses
->flags
& SESSION4_BACK_CHAN
)
1526 dir
|= NFS4_CDFC4_BACK
;
1527 return alloc_conn(rqstp
, dir
);
1530 /* must be called under client_lock */
1531 static void nfsd4_del_conns(struct nfsd4_session
*s
)
1533 struct nfs4_client
*clp
= s
->se_client
;
1534 struct nfsd4_conn
*c
;
1536 spin_lock(&clp
->cl_lock
);
1537 while (!list_empty(&s
->se_conns
)) {
1538 c
= list_first_entry(&s
->se_conns
, struct nfsd4_conn
, cn_persession
);
1539 list_del_init(&c
->cn_persession
);
1540 spin_unlock(&clp
->cl_lock
);
1542 unregister_xpt_user(c
->cn_xprt
, &c
->cn_xpt_user
);
1545 spin_lock(&clp
->cl_lock
);
1547 spin_unlock(&clp
->cl_lock
);
1550 static void __free_session(struct nfsd4_session
*ses
)
1552 free_session_slots(ses
);
1556 static void free_session(struct nfsd4_session
*ses
)
1558 nfsd4_del_conns(ses
);
1559 nfsd4_put_drc_mem(&ses
->se_fchannel
);
1560 __free_session(ses
);
1563 static void init_session(struct svc_rqst
*rqstp
, struct nfsd4_session
*new, struct nfs4_client
*clp
, struct nfsd4_create_session
*cses
)
1566 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
1568 new->se_client
= clp
;
1571 INIT_LIST_HEAD(&new->se_conns
);
1573 new->se_cb_seq_nr
= 1;
1574 new->se_flags
= cses
->flags
;
1575 new->se_cb_prog
= cses
->callback_prog
;
1576 new->se_cb_sec
= cses
->cb_sec
;
1577 atomic_set(&new->se_ref
, 0);
1578 idx
= hash_sessionid(&new->se_sessionid
);
1579 list_add(&new->se_hash
, &nn
->sessionid_hashtbl
[idx
]);
1580 spin_lock(&clp
->cl_lock
);
1581 list_add(&new->se_perclnt
, &clp
->cl_sessions
);
1582 spin_unlock(&clp
->cl_lock
);
1585 struct sockaddr
*sa
= svc_addr(rqstp
);
1587 * This is a little silly; with sessions there's no real
1588 * use for the callback address. Use the peer address
1589 * as a reasonable default for now, but consider fixing
1590 * the rpc client not to require an address in the
1593 rpc_copy_addr((struct sockaddr
*)&clp
->cl_cb_conn
.cb_addr
, sa
);
1594 clp
->cl_cb_conn
.cb_addrlen
= svc_addr_len(sa
);
1598 /* caller must hold client_lock */
1599 static struct nfsd4_session
*
1600 __find_in_sessionid_hashtbl(struct nfs4_sessionid
*sessionid
, struct net
*net
)
1602 struct nfsd4_session
*elem
;
1604 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
1606 lockdep_assert_held(&nn
->client_lock
);
1608 dump_sessionid(__func__
, sessionid
);
1609 idx
= hash_sessionid(sessionid
);
1610 /* Search in the appropriate list */
1611 list_for_each_entry(elem
, &nn
->sessionid_hashtbl
[idx
], se_hash
) {
1612 if (!memcmp(elem
->se_sessionid
.data
, sessionid
->data
,
1613 NFS4_MAX_SESSIONID_LEN
)) {
1618 dprintk("%s: session not found\n", __func__
);
1622 static struct nfsd4_session
*
1623 find_in_sessionid_hashtbl(struct nfs4_sessionid
*sessionid
, struct net
*net
,
1626 struct nfsd4_session
*session
;
1627 __be32 status
= nfserr_badsession
;
1629 session
= __find_in_sessionid_hashtbl(sessionid
, net
);
1632 status
= nfsd4_get_session_locked(session
);
1640 /* caller must hold client_lock */
1642 unhash_session(struct nfsd4_session
*ses
)
1644 struct nfs4_client
*clp
= ses
->se_client
;
1645 struct nfsd_net
*nn
= net_generic(clp
->net
, nfsd_net_id
);
1647 lockdep_assert_held(&nn
->client_lock
);
1649 list_del(&ses
->se_hash
);
1650 spin_lock(&ses
->se_client
->cl_lock
);
1651 list_del(&ses
->se_perclnt
);
1652 spin_unlock(&ses
->se_client
->cl_lock
);
1655 /* SETCLIENTID and SETCLIENTID_CONFIRM Helper functions */
1657 STALE_CLIENTID(clientid_t
*clid
, struct nfsd_net
*nn
)
1660 * We're assuming the clid was not given out from a boot
1661 * precisely 2^32 (about 136 years) before this one. That seems
1662 * a safe assumption:
1664 if (clid
->cl_boot
== (u32
)nn
->boot_time
)
1666 dprintk("NFSD stale clientid (%08x/%08x) boot_time %08lx\n",
1667 clid
->cl_boot
, clid
->cl_id
, nn
->boot_time
);
1672 * XXX Should we use a slab cache ?
1673 * This type of memory management is somewhat inefficient, but we use it
1674 * anyway since SETCLIENTID is not a common operation.
1676 static struct nfs4_client
*alloc_client(struct xdr_netobj name
)
1678 struct nfs4_client
*clp
;
1681 clp
= kzalloc(sizeof(struct nfs4_client
), GFP_KERNEL
);
1684 clp
->cl_name
.data
= kmemdup(name
.data
, name
.len
, GFP_KERNEL
);
1685 if (clp
->cl_name
.data
== NULL
)
1687 clp
->cl_ownerstr_hashtbl
= kmalloc(sizeof(struct list_head
) *
1688 OWNER_HASH_SIZE
, GFP_KERNEL
);
1689 if (!clp
->cl_ownerstr_hashtbl
)
1690 goto err_no_hashtbl
;
1691 for (i
= 0; i
< OWNER_HASH_SIZE
; i
++)
1692 INIT_LIST_HEAD(&clp
->cl_ownerstr_hashtbl
[i
]);
1693 clp
->cl_name
.len
= name
.len
;
1694 INIT_LIST_HEAD(&clp
->cl_sessions
);
1695 idr_init(&clp
->cl_stateids
);
1696 atomic_set(&clp
->cl_refcount
, 0);
1697 clp
->cl_cb_state
= NFSD4_CB_UNKNOWN
;
1698 INIT_LIST_HEAD(&clp
->cl_idhash
);
1699 INIT_LIST_HEAD(&clp
->cl_openowners
);
1700 INIT_LIST_HEAD(&clp
->cl_delegations
);
1701 INIT_LIST_HEAD(&clp
->cl_lru
);
1702 INIT_LIST_HEAD(&clp
->cl_revoked
);
1703 #ifdef CONFIG_NFSD_PNFS
1704 INIT_LIST_HEAD(&clp
->cl_lo_states
);
1706 spin_lock_init(&clp
->cl_lock
);
1707 rpc_init_wait_queue(&clp
->cl_cb_waitq
, "Backchannel slot table");
1710 kfree(clp
->cl_name
.data
);
1717 free_client(struct nfs4_client
*clp
)
1719 while (!list_empty(&clp
->cl_sessions
)) {
1720 struct nfsd4_session
*ses
;
1721 ses
= list_entry(clp
->cl_sessions
.next
, struct nfsd4_session
,
1723 list_del(&ses
->se_perclnt
);
1724 WARN_ON_ONCE(atomic_read(&ses
->se_ref
));
1727 rpc_destroy_wait_queue(&clp
->cl_cb_waitq
);
1728 free_svc_cred(&clp
->cl_cred
);
1729 kfree(clp
->cl_ownerstr_hashtbl
);
1730 kfree(clp
->cl_name
.data
);
1731 idr_destroy(&clp
->cl_stateids
);
1735 /* must be called under the client_lock */
1737 unhash_client_locked(struct nfs4_client
*clp
)
1739 struct nfsd_net
*nn
= net_generic(clp
->net
, nfsd_net_id
);
1740 struct nfsd4_session
*ses
;
1742 lockdep_assert_held(&nn
->client_lock
);
1744 /* Mark the client as expired! */
1746 /* Make it invisible */
1747 if (!list_empty(&clp
->cl_idhash
)) {
1748 list_del_init(&clp
->cl_idhash
);
1749 if (test_bit(NFSD4_CLIENT_CONFIRMED
, &clp
->cl_flags
))
1750 rb_erase(&clp
->cl_namenode
, &nn
->conf_name_tree
);
1752 rb_erase(&clp
->cl_namenode
, &nn
->unconf_name_tree
);
1754 list_del_init(&clp
->cl_lru
);
1755 spin_lock(&clp
->cl_lock
);
1756 list_for_each_entry(ses
, &clp
->cl_sessions
, se_perclnt
)
1757 list_del_init(&ses
->se_hash
);
1758 spin_unlock(&clp
->cl_lock
);
1762 unhash_client(struct nfs4_client
*clp
)
1764 struct nfsd_net
*nn
= net_generic(clp
->net
, nfsd_net_id
);
1766 spin_lock(&nn
->client_lock
);
1767 unhash_client_locked(clp
);
1768 spin_unlock(&nn
->client_lock
);
1771 static __be32
mark_client_expired_locked(struct nfs4_client
*clp
)
1773 if (atomic_read(&clp
->cl_refcount
))
1774 return nfserr_jukebox
;
1775 unhash_client_locked(clp
);
1780 __destroy_client(struct nfs4_client
*clp
)
1782 struct nfs4_openowner
*oo
;
1783 struct nfs4_delegation
*dp
;
1784 struct list_head reaplist
;
1786 INIT_LIST_HEAD(&reaplist
);
1787 spin_lock(&state_lock
);
1788 while (!list_empty(&clp
->cl_delegations
)) {
1789 dp
= list_entry(clp
->cl_delegations
.next
, struct nfs4_delegation
, dl_perclnt
);
1790 WARN_ON(!unhash_delegation_locked(dp
));
1791 list_add(&dp
->dl_recall_lru
, &reaplist
);
1793 spin_unlock(&state_lock
);
1794 while (!list_empty(&reaplist
)) {
1795 dp
= list_entry(reaplist
.next
, struct nfs4_delegation
, dl_recall_lru
);
1796 list_del_init(&dp
->dl_recall_lru
);
1797 put_clnt_odstate(dp
->dl_clnt_odstate
);
1798 nfs4_put_deleg_lease(dp
->dl_stid
.sc_file
);
1799 nfs4_put_stid(&dp
->dl_stid
);
1801 while (!list_empty(&clp
->cl_revoked
)) {
1802 dp
= list_entry(clp
->cl_revoked
.next
, struct nfs4_delegation
, dl_recall_lru
);
1803 list_del_init(&dp
->dl_recall_lru
);
1804 nfs4_put_stid(&dp
->dl_stid
);
1806 while (!list_empty(&clp
->cl_openowners
)) {
1807 oo
= list_entry(clp
->cl_openowners
.next
, struct nfs4_openowner
, oo_perclient
);
1808 nfs4_get_stateowner(&oo
->oo_owner
);
1809 release_openowner(oo
);
1811 nfsd4_return_all_client_layouts(clp
);
1812 nfsd4_shutdown_callback(clp
);
1813 if (clp
->cl_cb_conn
.cb_xprt
)
1814 svc_xprt_put(clp
->cl_cb_conn
.cb_xprt
);
1819 destroy_client(struct nfs4_client
*clp
)
1822 __destroy_client(clp
);
1825 static void expire_client(struct nfs4_client
*clp
)
1828 nfsd4_client_record_remove(clp
);
1829 __destroy_client(clp
);
1832 static void copy_verf(struct nfs4_client
*target
, nfs4_verifier
*source
)
1834 memcpy(target
->cl_verifier
.data
, source
->data
,
1835 sizeof(target
->cl_verifier
.data
));
1838 static void copy_clid(struct nfs4_client
*target
, struct nfs4_client
*source
)
1840 target
->cl_clientid
.cl_boot
= source
->cl_clientid
.cl_boot
;
1841 target
->cl_clientid
.cl_id
= source
->cl_clientid
.cl_id
;
1844 static int copy_cred(struct svc_cred
*target
, struct svc_cred
*source
)
1846 if (source
->cr_principal
) {
1847 target
->cr_principal
=
1848 kstrdup(source
->cr_principal
, GFP_KERNEL
);
1849 if (target
->cr_principal
== NULL
)
1852 target
->cr_principal
= NULL
;
1853 target
->cr_flavor
= source
->cr_flavor
;
1854 target
->cr_uid
= source
->cr_uid
;
1855 target
->cr_gid
= source
->cr_gid
;
1856 target
->cr_group_info
= source
->cr_group_info
;
1857 get_group_info(target
->cr_group_info
);
1858 target
->cr_gss_mech
= source
->cr_gss_mech
;
1859 if (source
->cr_gss_mech
)
1860 gss_mech_get(source
->cr_gss_mech
);
1865 compare_blob(const struct xdr_netobj
*o1
, const struct xdr_netobj
*o2
)
1867 if (o1
->len
< o2
->len
)
1869 if (o1
->len
> o2
->len
)
1871 return memcmp(o1
->data
, o2
->data
, o1
->len
);
1874 static int same_name(const char *n1
, const char *n2
)
1876 return 0 == memcmp(n1
, n2
, HEXDIR_LEN
);
1880 same_verf(nfs4_verifier
*v1
, nfs4_verifier
*v2
)
1882 return 0 == memcmp(v1
->data
, v2
->data
, sizeof(v1
->data
));
1886 same_clid(clientid_t
*cl1
, clientid_t
*cl2
)
1888 return (cl1
->cl_boot
== cl2
->cl_boot
) && (cl1
->cl_id
== cl2
->cl_id
);
1891 static bool groups_equal(struct group_info
*g1
, struct group_info
*g2
)
1895 if (g1
->ngroups
!= g2
->ngroups
)
1897 for (i
=0; i
<g1
->ngroups
; i
++)
1898 if (!gid_eq(GROUP_AT(g1
, i
), GROUP_AT(g2
, i
)))
1904 * RFC 3530 language requires clid_inuse be returned when the
1905 * "principal" associated with a requests differs from that previously
1906 * used. We use uid, gid's, and gss principal string as our best
1907 * approximation. We also don't want to allow non-gss use of a client
1908 * established using gss: in theory cr_principal should catch that
1909 * change, but in practice cr_principal can be null even in the gss case
1910 * since gssd doesn't always pass down a principal string.
1912 static bool is_gss_cred(struct svc_cred
*cr
)
1914 /* Is cr_flavor one of the gss "pseudoflavors"?: */
1915 return (cr
->cr_flavor
> RPC_AUTH_MAXFLAVOR
);
1920 same_creds(struct svc_cred
*cr1
, struct svc_cred
*cr2
)
1922 if ((is_gss_cred(cr1
) != is_gss_cred(cr2
))
1923 || (!uid_eq(cr1
->cr_uid
, cr2
->cr_uid
))
1924 || (!gid_eq(cr1
->cr_gid
, cr2
->cr_gid
))
1925 || !groups_equal(cr1
->cr_group_info
, cr2
->cr_group_info
))
1927 if (cr1
->cr_principal
== cr2
->cr_principal
)
1929 if (!cr1
->cr_principal
|| !cr2
->cr_principal
)
1931 return 0 == strcmp(cr1
->cr_principal
, cr2
->cr_principal
);
1934 static bool svc_rqst_integrity_protected(struct svc_rqst
*rqstp
)
1936 struct svc_cred
*cr
= &rqstp
->rq_cred
;
1939 if (!cr
->cr_gss_mech
)
1941 service
= gss_pseudoflavor_to_service(cr
->cr_gss_mech
, cr
->cr_flavor
);
1942 return service
== RPC_GSS_SVC_INTEGRITY
||
1943 service
== RPC_GSS_SVC_PRIVACY
;
1946 static bool mach_creds_match(struct nfs4_client
*cl
, struct svc_rqst
*rqstp
)
1948 struct svc_cred
*cr
= &rqstp
->rq_cred
;
1950 if (!cl
->cl_mach_cred
)
1952 if (cl
->cl_cred
.cr_gss_mech
!= cr
->cr_gss_mech
)
1954 if (!svc_rqst_integrity_protected(rqstp
))
1956 if (!cr
->cr_principal
)
1958 return 0 == strcmp(cl
->cl_cred
.cr_principal
, cr
->cr_principal
);
1961 static void gen_confirm(struct nfs4_client
*clp
, struct nfsd_net
*nn
)
1966 * This is opaque to client, so no need to byte-swap. Use
1967 * __force to keep sparse happy
1969 verf
[0] = (__force __be32
)get_seconds();
1970 verf
[1] = (__force __be32
)nn
->clverifier_counter
++;
1971 memcpy(clp
->cl_confirm
.data
, verf
, sizeof(clp
->cl_confirm
.data
));
1974 static void gen_clid(struct nfs4_client
*clp
, struct nfsd_net
*nn
)
1976 clp
->cl_clientid
.cl_boot
= nn
->boot_time
;
1977 clp
->cl_clientid
.cl_id
= nn
->clientid_counter
++;
1978 gen_confirm(clp
, nn
);
1981 static struct nfs4_stid
*
1982 find_stateid_locked(struct nfs4_client
*cl
, stateid_t
*t
)
1984 struct nfs4_stid
*ret
;
1986 ret
= idr_find(&cl
->cl_stateids
, t
->si_opaque
.so_id
);
1987 if (!ret
|| !ret
->sc_type
)
1992 static struct nfs4_stid
*
1993 find_stateid_by_type(struct nfs4_client
*cl
, stateid_t
*t
, char typemask
)
1995 struct nfs4_stid
*s
;
1997 spin_lock(&cl
->cl_lock
);
1998 s
= find_stateid_locked(cl
, t
);
2000 if (typemask
& s
->sc_type
)
2001 atomic_inc(&s
->sc_count
);
2005 spin_unlock(&cl
->cl_lock
);
2009 static struct nfs4_client
*create_client(struct xdr_netobj name
,
2010 struct svc_rqst
*rqstp
, nfs4_verifier
*verf
)
2012 struct nfs4_client
*clp
;
2013 struct sockaddr
*sa
= svc_addr(rqstp
);
2015 struct net
*net
= SVC_NET(rqstp
);
2017 clp
= alloc_client(name
);
2021 ret
= copy_cred(&clp
->cl_cred
, &rqstp
->rq_cred
);
2026 nfsd4_init_cb(&clp
->cl_cb_null
, clp
, NULL
, NFSPROC4_CLNT_CB_NULL
);
2027 clp
->cl_time
= get_seconds();
2028 clear_bit(0, &clp
->cl_cb_slot_busy
);
2029 copy_verf(clp
, verf
);
2030 rpc_copy_addr((struct sockaddr
*) &clp
->cl_addr
, sa
);
2031 clp
->cl_cb_session
= NULL
;
2037 add_clp_to_name_tree(struct nfs4_client
*new_clp
, struct rb_root
*root
)
2039 struct rb_node
**new = &(root
->rb_node
), *parent
= NULL
;
2040 struct nfs4_client
*clp
;
2043 clp
= rb_entry(*new, struct nfs4_client
, cl_namenode
);
2046 if (compare_blob(&clp
->cl_name
, &new_clp
->cl_name
) > 0)
2047 new = &((*new)->rb_left
);
2049 new = &((*new)->rb_right
);
2052 rb_link_node(&new_clp
->cl_namenode
, parent
, new);
2053 rb_insert_color(&new_clp
->cl_namenode
, root
);
2056 static struct nfs4_client
*
2057 find_clp_in_name_tree(struct xdr_netobj
*name
, struct rb_root
*root
)
2060 struct rb_node
*node
= root
->rb_node
;
2061 struct nfs4_client
*clp
;
2064 clp
= rb_entry(node
, struct nfs4_client
, cl_namenode
);
2065 cmp
= compare_blob(&clp
->cl_name
, name
);
2067 node
= node
->rb_left
;
2069 node
= node
->rb_right
;
2077 add_to_unconfirmed(struct nfs4_client
*clp
)
2079 unsigned int idhashval
;
2080 struct nfsd_net
*nn
= net_generic(clp
->net
, nfsd_net_id
);
2082 lockdep_assert_held(&nn
->client_lock
);
2084 clear_bit(NFSD4_CLIENT_CONFIRMED
, &clp
->cl_flags
);
2085 add_clp_to_name_tree(clp
, &nn
->unconf_name_tree
);
2086 idhashval
= clientid_hashval(clp
->cl_clientid
.cl_id
);
2087 list_add(&clp
->cl_idhash
, &nn
->unconf_id_hashtbl
[idhashval
]);
2088 renew_client_locked(clp
);
2092 move_to_confirmed(struct nfs4_client
*clp
)
2094 unsigned int idhashval
= clientid_hashval(clp
->cl_clientid
.cl_id
);
2095 struct nfsd_net
*nn
= net_generic(clp
->net
, nfsd_net_id
);
2097 lockdep_assert_held(&nn
->client_lock
);
2099 dprintk("NFSD: move_to_confirm nfs4_client %p\n", clp
);
2100 list_move(&clp
->cl_idhash
, &nn
->conf_id_hashtbl
[idhashval
]);
2101 rb_erase(&clp
->cl_namenode
, &nn
->unconf_name_tree
);
2102 add_clp_to_name_tree(clp
, &nn
->conf_name_tree
);
2103 set_bit(NFSD4_CLIENT_CONFIRMED
, &clp
->cl_flags
);
2104 renew_client_locked(clp
);
2107 static struct nfs4_client
*
2108 find_client_in_id_table(struct list_head
*tbl
, clientid_t
*clid
, bool sessions
)
2110 struct nfs4_client
*clp
;
2111 unsigned int idhashval
= clientid_hashval(clid
->cl_id
);
2113 list_for_each_entry(clp
, &tbl
[idhashval
], cl_idhash
) {
2114 if (same_clid(&clp
->cl_clientid
, clid
)) {
2115 if ((bool)clp
->cl_minorversion
!= sessions
)
2117 renew_client_locked(clp
);
2124 static struct nfs4_client
*
2125 find_confirmed_client(clientid_t
*clid
, bool sessions
, struct nfsd_net
*nn
)
2127 struct list_head
*tbl
= nn
->conf_id_hashtbl
;
2129 lockdep_assert_held(&nn
->client_lock
);
2130 return find_client_in_id_table(tbl
, clid
, sessions
);
2133 static struct nfs4_client
*
2134 find_unconfirmed_client(clientid_t
*clid
, bool sessions
, struct nfsd_net
*nn
)
2136 struct list_head
*tbl
= nn
->unconf_id_hashtbl
;
2138 lockdep_assert_held(&nn
->client_lock
);
2139 return find_client_in_id_table(tbl
, clid
, sessions
);
2142 static bool clp_used_exchangeid(struct nfs4_client
*clp
)
2144 return clp
->cl_exchange_flags
!= 0;
2147 static struct nfs4_client
*
2148 find_confirmed_client_by_name(struct xdr_netobj
*name
, struct nfsd_net
*nn
)
2150 lockdep_assert_held(&nn
->client_lock
);
2151 return find_clp_in_name_tree(name
, &nn
->conf_name_tree
);
2154 static struct nfs4_client
*
2155 find_unconfirmed_client_by_name(struct xdr_netobj
*name
, struct nfsd_net
*nn
)
2157 lockdep_assert_held(&nn
->client_lock
);
2158 return find_clp_in_name_tree(name
, &nn
->unconf_name_tree
);
2162 gen_callback(struct nfs4_client
*clp
, struct nfsd4_setclientid
*se
, struct svc_rqst
*rqstp
)
2164 struct nfs4_cb_conn
*conn
= &clp
->cl_cb_conn
;
2165 struct sockaddr
*sa
= svc_addr(rqstp
);
2166 u32 scopeid
= rpc_get_scope_id(sa
);
2167 unsigned short expected_family
;
2169 /* Currently, we only support tcp and tcp6 for the callback channel */
2170 if (se
->se_callback_netid_len
== 3 &&
2171 !memcmp(se
->se_callback_netid_val
, "tcp", 3))
2172 expected_family
= AF_INET
;
2173 else if (se
->se_callback_netid_len
== 4 &&
2174 !memcmp(se
->se_callback_netid_val
, "tcp6", 4))
2175 expected_family
= AF_INET6
;
2179 conn
->cb_addrlen
= rpc_uaddr2sockaddr(clp
->net
, se
->se_callback_addr_val
,
2180 se
->se_callback_addr_len
,
2181 (struct sockaddr
*)&conn
->cb_addr
,
2182 sizeof(conn
->cb_addr
));
2184 if (!conn
->cb_addrlen
|| conn
->cb_addr
.ss_family
!= expected_family
)
2187 if (conn
->cb_addr
.ss_family
== AF_INET6
)
2188 ((struct sockaddr_in6
*)&conn
->cb_addr
)->sin6_scope_id
= scopeid
;
2190 conn
->cb_prog
= se
->se_callback_prog
;
2191 conn
->cb_ident
= se
->se_callback_ident
;
2192 memcpy(&conn
->cb_saddr
, &rqstp
->rq_daddr
, rqstp
->rq_daddrlen
);
2195 conn
->cb_addr
.ss_family
= AF_UNSPEC
;
2196 conn
->cb_addrlen
= 0;
2197 dprintk(KERN_INFO
"NFSD: this client (clientid %08x/%08x) "
2198 "will not receive delegations\n",
2199 clp
->cl_clientid
.cl_boot
, clp
->cl_clientid
.cl_id
);
2205 * Cache a reply. nfsd4_check_resp_size() has bounded the cache size.
2208 nfsd4_store_cache_entry(struct nfsd4_compoundres
*resp
)
2210 struct xdr_buf
*buf
= resp
->xdr
.buf
;
2211 struct nfsd4_slot
*slot
= resp
->cstate
.slot
;
2214 dprintk("--> %s slot %p\n", __func__
, slot
);
2216 slot
->sl_opcnt
= resp
->opcnt
;
2217 slot
->sl_status
= resp
->cstate
.status
;
2219 slot
->sl_flags
|= NFSD4_SLOT_INITIALIZED
;
2220 if (nfsd4_not_cached(resp
)) {
2221 slot
->sl_datalen
= 0;
2224 base
= resp
->cstate
.data_offset
;
2225 slot
->sl_datalen
= buf
->len
- base
;
2226 if (read_bytes_from_xdr_buf(buf
, base
, slot
->sl_data
, slot
->sl_datalen
))
2227 WARN("%s: sessions DRC could not cache compound\n", __func__
);
2232 * Encode the replay sequence operation from the slot values.
2233 * If cachethis is FALSE encode the uncached rep error on the next
2234 * operation which sets resp->p and increments resp->opcnt for
2235 * nfs4svc_encode_compoundres.
2239 nfsd4_enc_sequence_replay(struct nfsd4_compoundargs
*args
,
2240 struct nfsd4_compoundres
*resp
)
2242 struct nfsd4_op
*op
;
2243 struct nfsd4_slot
*slot
= resp
->cstate
.slot
;
2245 /* Encode the replayed sequence operation */
2246 op
= &args
->ops
[resp
->opcnt
- 1];
2247 nfsd4_encode_operation(resp
, op
);
2249 /* Return nfserr_retry_uncached_rep in next operation. */
2250 if (args
->opcnt
> 1 && !(slot
->sl_flags
& NFSD4_SLOT_CACHETHIS
)) {
2251 op
= &args
->ops
[resp
->opcnt
++];
2252 op
->status
= nfserr_retry_uncached_rep
;
2253 nfsd4_encode_operation(resp
, op
);
2259 * The sequence operation is not cached because we can use the slot and
2263 nfsd4_replay_cache_entry(struct nfsd4_compoundres
*resp
,
2264 struct nfsd4_sequence
*seq
)
2266 struct nfsd4_slot
*slot
= resp
->cstate
.slot
;
2267 struct xdr_stream
*xdr
= &resp
->xdr
;
2271 dprintk("--> %s slot %p\n", __func__
, slot
);
2273 status
= nfsd4_enc_sequence_replay(resp
->rqstp
->rq_argp
, resp
);
2277 p
= xdr_reserve_space(xdr
, slot
->sl_datalen
);
2280 return nfserr_serverfault
;
2282 xdr_encode_opaque_fixed(p
, slot
->sl_data
, slot
->sl_datalen
);
2283 xdr_commit_encode(xdr
);
2285 resp
->opcnt
= slot
->sl_opcnt
;
2286 return slot
->sl_status
;
2290 * Set the exchange_id flags returned by the server.
2293 nfsd4_set_ex_flags(struct nfs4_client
*new, struct nfsd4_exchange_id
*clid
)
2295 #ifdef CONFIG_NFSD_PNFS
2296 new->cl_exchange_flags
|= EXCHGID4_FLAG_USE_PNFS_MDS
;
2298 new->cl_exchange_flags
|= EXCHGID4_FLAG_USE_NON_PNFS
;
2301 /* Referrals are supported, Migration is not. */
2302 new->cl_exchange_flags
|= EXCHGID4_FLAG_SUPP_MOVED_REFER
;
2304 /* set the wire flags to return to client. */
2305 clid
->flags
= new->cl_exchange_flags
;
2308 static bool client_has_openowners(struct nfs4_client
*clp
)
2310 struct nfs4_openowner
*oo
;
2312 list_for_each_entry(oo
, &clp
->cl_openowners
, oo_perclient
) {
2313 if (!list_empty(&oo
->oo_owner
.so_stateids
))
2319 static bool client_has_state(struct nfs4_client
*clp
)
2321 return client_has_openowners(clp
)
2322 #ifdef CONFIG_NFSD_PNFS
2323 || !list_empty(&clp
->cl_lo_states
)
2325 || !list_empty(&clp
->cl_delegations
)
2326 || !list_empty(&clp
->cl_sessions
);
2330 nfsd4_exchange_id(struct svc_rqst
*rqstp
,
2331 struct nfsd4_compound_state
*cstate
,
2332 struct nfsd4_exchange_id
*exid
)
2334 struct nfs4_client
*conf
, *new;
2335 struct nfs4_client
*unconf
= NULL
;
2337 char addr_str
[INET6_ADDRSTRLEN
];
2338 nfs4_verifier verf
= exid
->verifier
;
2339 struct sockaddr
*sa
= svc_addr(rqstp
);
2340 bool update
= exid
->flags
& EXCHGID4_FLAG_UPD_CONFIRMED_REC_A
;
2341 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
2343 rpc_ntop(sa
, addr_str
, sizeof(addr_str
));
2344 dprintk("%s rqstp=%p exid=%p clname.len=%u clname.data=%p "
2345 "ip_addr=%s flags %x, spa_how %d\n",
2346 __func__
, rqstp
, exid
, exid
->clname
.len
, exid
->clname
.data
,
2347 addr_str
, exid
->flags
, exid
->spa_how
);
2349 if (exid
->flags
& ~EXCHGID4_FLAG_MASK_A
)
2350 return nfserr_inval
;
2352 switch (exid
->spa_how
) {
2354 if (!svc_rqst_integrity_protected(rqstp
))
2355 return nfserr_inval
;
2358 default: /* checked by xdr code */
2361 return nfserr_encr_alg_unsupp
;
2364 new = create_client(exid
->clname
, rqstp
, &verf
);
2366 return nfserr_jukebox
;
2368 /* Cases below refer to rfc 5661 section 18.35.4: */
2369 spin_lock(&nn
->client_lock
);
2370 conf
= find_confirmed_client_by_name(&exid
->clname
, nn
);
2372 bool creds_match
= same_creds(&conf
->cl_cred
, &rqstp
->rq_cred
);
2373 bool verfs_match
= same_verf(&verf
, &conf
->cl_verifier
);
2376 if (!clp_used_exchangeid(conf
)) { /* buggy client */
2377 status
= nfserr_inval
;
2380 if (!mach_creds_match(conf
, rqstp
)) {
2381 status
= nfserr_wrong_cred
;
2384 if (!creds_match
) { /* case 9 */
2385 status
= nfserr_perm
;
2388 if (!verfs_match
) { /* case 8 */
2389 status
= nfserr_not_same
;
2393 exid
->flags
|= EXCHGID4_FLAG_CONFIRMED_R
;
2396 if (!creds_match
) { /* case 3 */
2397 if (client_has_state(conf
)) {
2398 status
= nfserr_clid_inuse
;
2403 if (verfs_match
) { /* case 2 */
2404 conf
->cl_exchange_flags
|= EXCHGID4_FLAG_CONFIRMED_R
;
2407 /* case 5, client reboot */
2412 if (update
) { /* case 7 */
2413 status
= nfserr_noent
;
2417 unconf
= find_unconfirmed_client_by_name(&exid
->clname
, nn
);
2418 if (unconf
) /* case 4, possible retry or client restart */
2419 unhash_client_locked(unconf
);
2421 /* case 1 (normal case) */
2424 status
= mark_client_expired_locked(conf
);
2428 new->cl_minorversion
= cstate
->minorversion
;
2429 new->cl_mach_cred
= (exid
->spa_how
== SP4_MACH_CRED
);
2432 add_to_unconfirmed(new);
2435 exid
->clientid
.cl_boot
= conf
->cl_clientid
.cl_boot
;
2436 exid
->clientid
.cl_id
= conf
->cl_clientid
.cl_id
;
2438 exid
->seqid
= conf
->cl_cs_slot
.sl_seqid
+ 1;
2439 nfsd4_set_ex_flags(conf
, exid
);
2441 dprintk("nfsd4_exchange_id seqid %d flags %x\n",
2442 conf
->cl_cs_slot
.sl_seqid
, conf
->cl_exchange_flags
);
2446 spin_unlock(&nn
->client_lock
);
2450 expire_client(unconf
);
2455 check_slot_seqid(u32 seqid
, u32 slot_seqid
, int slot_inuse
)
2457 dprintk("%s enter. seqid %d slot_seqid %d\n", __func__
, seqid
,
2460 /* The slot is in use, and no response has been sent. */
2462 if (seqid
== slot_seqid
)
2463 return nfserr_jukebox
;
2465 return nfserr_seq_misordered
;
2467 /* Note unsigned 32-bit arithmetic handles wraparound: */
2468 if (likely(seqid
== slot_seqid
+ 1))
2470 if (seqid
== slot_seqid
)
2471 return nfserr_replay_cache
;
2472 return nfserr_seq_misordered
;
2476 * Cache the create session result into the create session single DRC
2477 * slot cache by saving the xdr structure. sl_seqid has been set.
2478 * Do this for solo or embedded create session operations.
2481 nfsd4_cache_create_session(struct nfsd4_create_session
*cr_ses
,
2482 struct nfsd4_clid_slot
*slot
, __be32 nfserr
)
2484 slot
->sl_status
= nfserr
;
2485 memcpy(&slot
->sl_cr_ses
, cr_ses
, sizeof(*cr_ses
));
2489 nfsd4_replay_create_session(struct nfsd4_create_session
*cr_ses
,
2490 struct nfsd4_clid_slot
*slot
)
2492 memcpy(cr_ses
, &slot
->sl_cr_ses
, sizeof(*cr_ses
));
2493 return slot
->sl_status
;
2496 #define NFSD_MIN_REQ_HDR_SEQ_SZ ((\
2497 2 * 2 + /* credential,verifier: AUTH_NULL, length 0 */ \
2498 1 + /* MIN tag is length with zero, only length */ \
2499 3 + /* version, opcount, opcode */ \
2500 XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
2501 /* seqid, slotID, slotID, cache */ \
2502 4 ) * sizeof(__be32))
2504 #define NFSD_MIN_RESP_HDR_SEQ_SZ ((\
2505 2 + /* verifier: AUTH_NULL, length 0 */\
2507 1 + /* MIN tag is length with zero, only length */ \
2508 3 + /* opcount, opcode, opstatus*/ \
2509 XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
2510 /* seqid, slotID, slotID, slotID, status */ \
2511 5 ) * sizeof(__be32))
2513 static __be32
check_forechannel_attrs(struct nfsd4_channel_attrs
*ca
, struct nfsd_net
*nn
)
2515 u32 maxrpc
= nn
->nfsd_serv
->sv_max_mesg
;
2517 if (ca
->maxreq_sz
< NFSD_MIN_REQ_HDR_SEQ_SZ
)
2518 return nfserr_toosmall
;
2519 if (ca
->maxresp_sz
< NFSD_MIN_RESP_HDR_SEQ_SZ
)
2520 return nfserr_toosmall
;
2521 ca
->headerpadsz
= 0;
2522 ca
->maxreq_sz
= min_t(u32
, ca
->maxreq_sz
, maxrpc
);
2523 ca
->maxresp_sz
= min_t(u32
, ca
->maxresp_sz
, maxrpc
);
2524 ca
->maxops
= min_t(u32
, ca
->maxops
, NFSD_MAX_OPS_PER_COMPOUND
);
2525 ca
->maxresp_cached
= min_t(u32
, ca
->maxresp_cached
,
2526 NFSD_SLOT_CACHE_SIZE
+ NFSD_MIN_HDR_SEQ_SZ
);
2527 ca
->maxreqs
= min_t(u32
, ca
->maxreqs
, NFSD_MAX_SLOTS_PER_SESSION
);
2529 * Note decreasing slot size below client's request may make it
2530 * difficult for client to function correctly, whereas
2531 * decreasing the number of slots will (just?) affect
2532 * performance. When short on memory we therefore prefer to
2533 * decrease number of slots instead of their size. Clients that
2534 * request larger slots than they need will get poor results:
2536 ca
->maxreqs
= nfsd4_get_drc_mem(ca
);
2538 return nfserr_jukebox
;
2543 #define NFSD_CB_MAX_REQ_SZ ((NFS4_enc_cb_recall_sz + \
2544 RPC_MAX_HEADER_WITH_AUTH) * sizeof(__be32))
2545 #define NFSD_CB_MAX_RESP_SZ ((NFS4_dec_cb_recall_sz + \
2546 RPC_MAX_REPHEADER_WITH_AUTH) * sizeof(__be32))
2548 static __be32
check_backchannel_attrs(struct nfsd4_channel_attrs
*ca
)
2550 ca
->headerpadsz
= 0;
2553 * These RPC_MAX_HEADER macros are overkill, especially since we
2554 * don't even do gss on the backchannel yet. But this is still
2555 * less than 1k. Tighten up this estimate in the unlikely event
2556 * it turns out to be a problem for some client:
2558 if (ca
->maxreq_sz
< NFSD_CB_MAX_REQ_SZ
)
2559 return nfserr_toosmall
;
2560 if (ca
->maxresp_sz
< NFSD_CB_MAX_RESP_SZ
)
2561 return nfserr_toosmall
;
2562 ca
->maxresp_cached
= 0;
2564 return nfserr_toosmall
;
2569 static __be32
nfsd4_check_cb_sec(struct nfsd4_cb_sec
*cbs
)
2571 switch (cbs
->flavor
) {
2577 * GSS case: the spec doesn't allow us to return this
2578 * error. But it also doesn't allow us not to support
2580 * I'd rather this fail hard than return some error the
2581 * client might think it can already handle:
2583 return nfserr_encr_alg_unsupp
;
2588 nfsd4_create_session(struct svc_rqst
*rqstp
,
2589 struct nfsd4_compound_state
*cstate
,
2590 struct nfsd4_create_session
*cr_ses
)
2592 struct sockaddr
*sa
= svc_addr(rqstp
);
2593 struct nfs4_client
*conf
, *unconf
;
2594 struct nfs4_client
*old
= NULL
;
2595 struct nfsd4_session
*new;
2596 struct nfsd4_conn
*conn
;
2597 struct nfsd4_clid_slot
*cs_slot
= NULL
;
2599 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
2601 if (cr_ses
->flags
& ~SESSION4_FLAG_MASK_A
)
2602 return nfserr_inval
;
2603 status
= nfsd4_check_cb_sec(&cr_ses
->cb_sec
);
2606 status
= check_forechannel_attrs(&cr_ses
->fore_channel
, nn
);
2609 status
= check_backchannel_attrs(&cr_ses
->back_channel
);
2611 goto out_release_drc_mem
;
2612 status
= nfserr_jukebox
;
2613 new = alloc_session(&cr_ses
->fore_channel
, &cr_ses
->back_channel
);
2615 goto out_release_drc_mem
;
2616 conn
= alloc_conn_from_crses(rqstp
, cr_ses
);
2618 goto out_free_session
;
2620 spin_lock(&nn
->client_lock
);
2621 unconf
= find_unconfirmed_client(&cr_ses
->clientid
, true, nn
);
2622 conf
= find_confirmed_client(&cr_ses
->clientid
, true, nn
);
2623 WARN_ON_ONCE(conf
&& unconf
);
2626 status
= nfserr_wrong_cred
;
2627 if (!mach_creds_match(conf
, rqstp
))
2629 cs_slot
= &conf
->cl_cs_slot
;
2630 status
= check_slot_seqid(cr_ses
->seqid
, cs_slot
->sl_seqid
, 0);
2632 if (status
== nfserr_replay_cache
)
2633 status
= nfsd4_replay_create_session(cr_ses
, cs_slot
);
2636 } else if (unconf
) {
2637 if (!same_creds(&unconf
->cl_cred
, &rqstp
->rq_cred
) ||
2638 !rpc_cmp_addr(sa
, (struct sockaddr
*) &unconf
->cl_addr
)) {
2639 status
= nfserr_clid_inuse
;
2642 status
= nfserr_wrong_cred
;
2643 if (!mach_creds_match(unconf
, rqstp
))
2645 cs_slot
= &unconf
->cl_cs_slot
;
2646 status
= check_slot_seqid(cr_ses
->seqid
, cs_slot
->sl_seqid
, 0);
2648 /* an unconfirmed replay returns misordered */
2649 status
= nfserr_seq_misordered
;
2652 old
= find_confirmed_client_by_name(&unconf
->cl_name
, nn
);
2654 status
= mark_client_expired_locked(old
);
2660 move_to_confirmed(unconf
);
2663 status
= nfserr_stale_clientid
;
2668 * We do not support RDMA or persistent sessions
2670 cr_ses
->flags
&= ~SESSION4_PERSIST
;
2671 cr_ses
->flags
&= ~SESSION4_RDMA
;
2673 init_session(rqstp
, new, conf
, cr_ses
);
2674 nfsd4_get_session_locked(new);
2676 memcpy(cr_ses
->sessionid
.data
, new->se_sessionid
.data
,
2677 NFS4_MAX_SESSIONID_LEN
);
2678 cs_slot
->sl_seqid
++;
2679 cr_ses
->seqid
= cs_slot
->sl_seqid
;
2681 /* cache solo and embedded create sessions under the client_lock */
2682 nfsd4_cache_create_session(cr_ses
, cs_slot
, status
);
2683 spin_unlock(&nn
->client_lock
);
2684 /* init connection and backchannel */
2685 nfsd4_init_conn(rqstp
, conn
, new);
2686 nfsd4_put_session(new);
2691 spin_unlock(&nn
->client_lock
);
2696 __free_session(new);
2697 out_release_drc_mem
:
2698 nfsd4_put_drc_mem(&cr_ses
->fore_channel
);
2702 static __be32
nfsd4_map_bcts_dir(u32
*dir
)
2705 case NFS4_CDFC4_FORE
:
2706 case NFS4_CDFC4_BACK
:
2708 case NFS4_CDFC4_FORE_OR_BOTH
:
2709 case NFS4_CDFC4_BACK_OR_BOTH
:
2710 *dir
= NFS4_CDFC4_BOTH
;
2713 return nfserr_inval
;
2716 __be32
nfsd4_backchannel_ctl(struct svc_rqst
*rqstp
, struct nfsd4_compound_state
*cstate
, struct nfsd4_backchannel_ctl
*bc
)
2718 struct nfsd4_session
*session
= cstate
->session
;
2719 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
2722 status
= nfsd4_check_cb_sec(&bc
->bc_cb_sec
);
2725 spin_lock(&nn
->client_lock
);
2726 session
->se_cb_prog
= bc
->bc_cb_program
;
2727 session
->se_cb_sec
= bc
->bc_cb_sec
;
2728 spin_unlock(&nn
->client_lock
);
2730 nfsd4_probe_callback(session
->se_client
);
2735 __be32
nfsd4_bind_conn_to_session(struct svc_rqst
*rqstp
,
2736 struct nfsd4_compound_state
*cstate
,
2737 struct nfsd4_bind_conn_to_session
*bcts
)
2740 struct nfsd4_conn
*conn
;
2741 struct nfsd4_session
*session
;
2742 struct net
*net
= SVC_NET(rqstp
);
2743 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
2745 if (!nfsd4_last_compound_op(rqstp
))
2746 return nfserr_not_only_op
;
2747 spin_lock(&nn
->client_lock
);
2748 session
= find_in_sessionid_hashtbl(&bcts
->sessionid
, net
, &status
);
2749 spin_unlock(&nn
->client_lock
);
2751 goto out_no_session
;
2752 status
= nfserr_wrong_cred
;
2753 if (!mach_creds_match(session
->se_client
, rqstp
))
2755 status
= nfsd4_map_bcts_dir(&bcts
->dir
);
2758 conn
= alloc_conn(rqstp
, bcts
->dir
);
2759 status
= nfserr_jukebox
;
2762 nfsd4_init_conn(rqstp
, conn
, session
);
2765 nfsd4_put_session(session
);
2770 static bool nfsd4_compound_in_session(struct nfsd4_session
*session
, struct nfs4_sessionid
*sid
)
2774 return !memcmp(sid
, &session
->se_sessionid
, sizeof(*sid
));
2778 nfsd4_destroy_session(struct svc_rqst
*r
,
2779 struct nfsd4_compound_state
*cstate
,
2780 struct nfsd4_destroy_session
*sessionid
)
2782 struct nfsd4_session
*ses
;
2784 int ref_held_by_me
= 0;
2785 struct net
*net
= SVC_NET(r
);
2786 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
2788 status
= nfserr_not_only_op
;
2789 if (nfsd4_compound_in_session(cstate
->session
, &sessionid
->sessionid
)) {
2790 if (!nfsd4_last_compound_op(r
))
2794 dump_sessionid(__func__
, &sessionid
->sessionid
);
2795 spin_lock(&nn
->client_lock
);
2796 ses
= find_in_sessionid_hashtbl(&sessionid
->sessionid
, net
, &status
);
2798 goto out_client_lock
;
2799 status
= nfserr_wrong_cred
;
2800 if (!mach_creds_match(ses
->se_client
, r
))
2801 goto out_put_session
;
2802 status
= mark_session_dead_locked(ses
, 1 + ref_held_by_me
);
2804 goto out_put_session
;
2805 unhash_session(ses
);
2806 spin_unlock(&nn
->client_lock
);
2808 nfsd4_probe_callback_sync(ses
->se_client
);
2810 spin_lock(&nn
->client_lock
);
2813 nfsd4_put_session_locked(ses
);
2815 spin_unlock(&nn
->client_lock
);
2820 static struct nfsd4_conn
*__nfsd4_find_conn(struct svc_xprt
*xpt
, struct nfsd4_session
*s
)
2822 struct nfsd4_conn
*c
;
2824 list_for_each_entry(c
, &s
->se_conns
, cn_persession
) {
2825 if (c
->cn_xprt
== xpt
) {
2832 static __be32
nfsd4_sequence_check_conn(struct nfsd4_conn
*new, struct nfsd4_session
*ses
)
2834 struct nfs4_client
*clp
= ses
->se_client
;
2835 struct nfsd4_conn
*c
;
2836 __be32 status
= nfs_ok
;
2839 spin_lock(&clp
->cl_lock
);
2840 c
= __nfsd4_find_conn(new->cn_xprt
, ses
);
2843 status
= nfserr_conn_not_bound_to_session
;
2844 if (clp
->cl_mach_cred
)
2846 __nfsd4_hash_conn(new, ses
);
2847 spin_unlock(&clp
->cl_lock
);
2848 ret
= nfsd4_register_conn(new);
2850 /* oops; xprt is already down: */
2851 nfsd4_conn_lost(&new->cn_xpt_user
);
2854 spin_unlock(&clp
->cl_lock
);
2859 static bool nfsd4_session_too_many_ops(struct svc_rqst
*rqstp
, struct nfsd4_session
*session
)
2861 struct nfsd4_compoundargs
*args
= rqstp
->rq_argp
;
2863 return args
->opcnt
> session
->se_fchannel
.maxops
;
2866 static bool nfsd4_request_too_big(struct svc_rqst
*rqstp
,
2867 struct nfsd4_session
*session
)
2869 struct xdr_buf
*xb
= &rqstp
->rq_arg
;
2871 return xb
->len
> session
->se_fchannel
.maxreq_sz
;
2875 nfsd4_sequence(struct svc_rqst
*rqstp
,
2876 struct nfsd4_compound_state
*cstate
,
2877 struct nfsd4_sequence
*seq
)
2879 struct nfsd4_compoundres
*resp
= rqstp
->rq_resp
;
2880 struct xdr_stream
*xdr
= &resp
->xdr
;
2881 struct nfsd4_session
*session
;
2882 struct nfs4_client
*clp
;
2883 struct nfsd4_slot
*slot
;
2884 struct nfsd4_conn
*conn
;
2887 struct net
*net
= SVC_NET(rqstp
);
2888 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
2890 if (resp
->opcnt
!= 1)
2891 return nfserr_sequence_pos
;
2894 * Will be either used or freed by nfsd4_sequence_check_conn
2897 conn
= alloc_conn(rqstp
, NFS4_CDFC4_FORE
);
2899 return nfserr_jukebox
;
2901 spin_lock(&nn
->client_lock
);
2902 session
= find_in_sessionid_hashtbl(&seq
->sessionid
, net
, &status
);
2904 goto out_no_session
;
2905 clp
= session
->se_client
;
2907 status
= nfserr_too_many_ops
;
2908 if (nfsd4_session_too_many_ops(rqstp
, session
))
2909 goto out_put_session
;
2911 status
= nfserr_req_too_big
;
2912 if (nfsd4_request_too_big(rqstp
, session
))
2913 goto out_put_session
;
2915 status
= nfserr_badslot
;
2916 if (seq
->slotid
>= session
->se_fchannel
.maxreqs
)
2917 goto out_put_session
;
2919 slot
= session
->se_slots
[seq
->slotid
];
2920 dprintk("%s: slotid %d\n", __func__
, seq
->slotid
);
2922 /* We do not negotiate the number of slots yet, so set the
2923 * maxslots to the session maxreqs which is used to encode
2924 * sr_highest_slotid and the sr_target_slot id to maxslots */
2925 seq
->maxslots
= session
->se_fchannel
.maxreqs
;
2927 status
= check_slot_seqid(seq
->seqid
, slot
->sl_seqid
,
2928 slot
->sl_flags
& NFSD4_SLOT_INUSE
);
2929 if (status
== nfserr_replay_cache
) {
2930 status
= nfserr_seq_misordered
;
2931 if (!(slot
->sl_flags
& NFSD4_SLOT_INITIALIZED
))
2932 goto out_put_session
;
2933 cstate
->slot
= slot
;
2934 cstate
->session
= session
;
2936 /* Return the cached reply status and set cstate->status
2937 * for nfsd4_proc_compound processing */
2938 status
= nfsd4_replay_cache_entry(resp
, seq
);
2939 cstate
->status
= nfserr_replay_cache
;
2943 goto out_put_session
;
2945 status
= nfsd4_sequence_check_conn(conn
, session
);
2948 goto out_put_session
;
2950 buflen
= (seq
->cachethis
) ?
2951 session
->se_fchannel
.maxresp_cached
:
2952 session
->se_fchannel
.maxresp_sz
;
2953 status
= (seq
->cachethis
) ? nfserr_rep_too_big_to_cache
:
2955 if (xdr_restrict_buflen(xdr
, buflen
- rqstp
->rq_auth_slack
))
2956 goto out_put_session
;
2957 svc_reserve(rqstp
, buflen
);
2960 /* Success! bump slot seqid */
2961 slot
->sl_seqid
= seq
->seqid
;
2962 slot
->sl_flags
|= NFSD4_SLOT_INUSE
;
2964 slot
->sl_flags
|= NFSD4_SLOT_CACHETHIS
;
2966 slot
->sl_flags
&= ~NFSD4_SLOT_CACHETHIS
;
2968 cstate
->slot
= slot
;
2969 cstate
->session
= session
;
2973 switch (clp
->cl_cb_state
) {
2975 seq
->status_flags
= SEQ4_STATUS_CB_PATH_DOWN
;
2977 case NFSD4_CB_FAULT
:
2978 seq
->status_flags
= SEQ4_STATUS_BACKCHANNEL_FAULT
;
2981 seq
->status_flags
= 0;
2983 if (!list_empty(&clp
->cl_revoked
))
2984 seq
->status_flags
|= SEQ4_STATUS_RECALLABLE_STATE_REVOKED
;
2988 spin_unlock(&nn
->client_lock
);
2991 nfsd4_put_session_locked(session
);
2992 goto out_no_session
;
2996 nfsd4_sequence_done(struct nfsd4_compoundres
*resp
)
2998 struct nfsd4_compound_state
*cs
= &resp
->cstate
;
3000 if (nfsd4_has_session(cs
)) {
3001 if (cs
->status
!= nfserr_replay_cache
) {
3002 nfsd4_store_cache_entry(resp
);
3003 cs
->slot
->sl_flags
&= ~NFSD4_SLOT_INUSE
;
3005 /* Drop session reference that was taken in nfsd4_sequence() */
3006 nfsd4_put_session(cs
->session
);
3008 put_client_renew(cs
->clp
);
3012 nfsd4_destroy_clientid(struct svc_rqst
*rqstp
, struct nfsd4_compound_state
*cstate
, struct nfsd4_destroy_clientid
*dc
)
3014 struct nfs4_client
*conf
, *unconf
;
3015 struct nfs4_client
*clp
= NULL
;
3017 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
3019 spin_lock(&nn
->client_lock
);
3020 unconf
= find_unconfirmed_client(&dc
->clientid
, true, nn
);
3021 conf
= find_confirmed_client(&dc
->clientid
, true, nn
);
3022 WARN_ON_ONCE(conf
&& unconf
);
3025 if (client_has_state(conf
)) {
3026 status
= nfserr_clientid_busy
;
3029 status
= mark_client_expired_locked(conf
);
3036 status
= nfserr_stale_clientid
;
3039 if (!mach_creds_match(clp
, rqstp
)) {
3041 status
= nfserr_wrong_cred
;
3044 unhash_client_locked(clp
);
3046 spin_unlock(&nn
->client_lock
);
3053 nfsd4_reclaim_complete(struct svc_rqst
*rqstp
, struct nfsd4_compound_state
*cstate
, struct nfsd4_reclaim_complete
*rc
)
3057 if (rc
->rca_one_fs
) {
3058 if (!cstate
->current_fh
.fh_dentry
)
3059 return nfserr_nofilehandle
;
3061 * We don't take advantage of the rca_one_fs case.
3062 * That's OK, it's optional, we can safely ignore it.
3067 status
= nfserr_complete_already
;
3068 if (test_and_set_bit(NFSD4_CLIENT_RECLAIM_COMPLETE
,
3069 &cstate
->session
->se_client
->cl_flags
))
3072 status
= nfserr_stale_clientid
;
3073 if (is_client_expired(cstate
->session
->se_client
))
3075 * The following error isn't really legal.
3076 * But we only get here if the client just explicitly
3077 * destroyed the client. Surely it no longer cares what
3078 * error it gets back on an operation for the dead
3084 nfsd4_client_record_create(cstate
->session
->se_client
);
3090 nfsd4_setclientid(struct svc_rqst
*rqstp
, struct nfsd4_compound_state
*cstate
,
3091 struct nfsd4_setclientid
*setclid
)
3093 struct xdr_netobj clname
= setclid
->se_name
;
3094 nfs4_verifier clverifier
= setclid
->se_verf
;
3095 struct nfs4_client
*conf
, *new;
3096 struct nfs4_client
*unconf
= NULL
;
3098 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
3100 new = create_client(clname
, rqstp
, &clverifier
);
3102 return nfserr_jukebox
;
3103 /* Cases below refer to rfc 3530 section 14.2.33: */
3104 spin_lock(&nn
->client_lock
);
3105 conf
= find_confirmed_client_by_name(&clname
, nn
);
3106 if (conf
&& client_has_state(conf
)) {
3108 status
= nfserr_clid_inuse
;
3109 if (clp_used_exchangeid(conf
))
3111 if (!same_creds(&conf
->cl_cred
, &rqstp
->rq_cred
)) {
3112 char addr_str
[INET6_ADDRSTRLEN
];
3113 rpc_ntop((struct sockaddr
*) &conf
->cl_addr
, addr_str
,
3115 dprintk("NFSD: setclientid: string in use by client "
3116 "at %s\n", addr_str
);
3120 unconf
= find_unconfirmed_client_by_name(&clname
, nn
);
3122 unhash_client_locked(unconf
);
3123 if (conf
&& same_verf(&conf
->cl_verifier
, &clverifier
)) {
3124 /* case 1: probable callback update */
3125 copy_clid(new, conf
);
3126 gen_confirm(new, nn
);
3127 } else /* case 4 (new client) or cases 2, 3 (client reboot): */
3129 new->cl_minorversion
= 0;
3130 gen_callback(new, setclid
, rqstp
);
3131 add_to_unconfirmed(new);
3132 setclid
->se_clientid
.cl_boot
= new->cl_clientid
.cl_boot
;
3133 setclid
->se_clientid
.cl_id
= new->cl_clientid
.cl_id
;
3134 memcpy(setclid
->se_confirm
.data
, new->cl_confirm
.data
, sizeof(setclid
->se_confirm
.data
));
3138 spin_unlock(&nn
->client_lock
);
3142 expire_client(unconf
);
3148 nfsd4_setclientid_confirm(struct svc_rqst
*rqstp
,
3149 struct nfsd4_compound_state
*cstate
,
3150 struct nfsd4_setclientid_confirm
*setclientid_confirm
)
3152 struct nfs4_client
*conf
, *unconf
;
3153 struct nfs4_client
*old
= NULL
;
3154 nfs4_verifier confirm
= setclientid_confirm
->sc_confirm
;
3155 clientid_t
* clid
= &setclientid_confirm
->sc_clientid
;
3157 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
3159 if (STALE_CLIENTID(clid
, nn
))
3160 return nfserr_stale_clientid
;
3162 spin_lock(&nn
->client_lock
);
3163 conf
= find_confirmed_client(clid
, false, nn
);
3164 unconf
= find_unconfirmed_client(clid
, false, nn
);
3166 * We try hard to give out unique clientid's, so if we get an
3167 * attempt to confirm the same clientid with a different cred,
3168 * the client may be buggy; this should never happen.
3170 * Nevertheless, RFC 7530 recommends INUSE for this case:
3172 status
= nfserr_clid_inuse
;
3173 if (unconf
&& !same_creds(&unconf
->cl_cred
, &rqstp
->rq_cred
))
3175 if (conf
&& !same_creds(&conf
->cl_cred
, &rqstp
->rq_cred
))
3177 /* cases below refer to rfc 3530 section 14.2.34: */
3178 if (!unconf
|| !same_verf(&confirm
, &unconf
->cl_confirm
)) {
3179 if (conf
&& !unconf
) /* case 2: probable retransmit */
3181 else /* case 4: client hasn't noticed we rebooted yet? */
3182 status
= nfserr_stale_clientid
;
3186 if (conf
) { /* case 1: callback update */
3188 unhash_client_locked(old
);
3189 nfsd4_change_callback(conf
, &unconf
->cl_cb_conn
);
3190 } else { /* case 3: normal case; new or rebooted client */
3191 old
= find_confirmed_client_by_name(&unconf
->cl_name
, nn
);
3193 status
= nfserr_clid_inuse
;
3194 if (client_has_state(old
)
3195 && !same_creds(&unconf
->cl_cred
,
3198 status
= mark_client_expired_locked(old
);
3204 move_to_confirmed(unconf
);
3207 get_client_locked(conf
);
3208 spin_unlock(&nn
->client_lock
);
3209 nfsd4_probe_callback(conf
);
3210 spin_lock(&nn
->client_lock
);
3211 put_client_renew_locked(conf
);
3213 spin_unlock(&nn
->client_lock
);
3219 static struct nfs4_file
*nfsd4_alloc_file(void)
3221 return kmem_cache_alloc(file_slab
, GFP_KERNEL
);
3224 /* OPEN Share state helper functions */
3225 static void nfsd4_init_file(struct knfsd_fh
*fh
, unsigned int hashval
,
3226 struct nfs4_file
*fp
)
3228 lockdep_assert_held(&state_lock
);
3230 atomic_set(&fp
->fi_ref
, 1);
3231 spin_lock_init(&fp
->fi_lock
);
3232 INIT_LIST_HEAD(&fp
->fi_stateids
);
3233 INIT_LIST_HEAD(&fp
->fi_delegations
);
3234 INIT_LIST_HEAD(&fp
->fi_clnt_odstate
);
3235 fh_copy_shallow(&fp
->fi_fhandle
, fh
);
3236 fp
->fi_deleg_file
= NULL
;
3237 fp
->fi_had_conflict
= false;
3238 fp
->fi_share_deny
= 0;
3239 memset(fp
->fi_fds
, 0, sizeof(fp
->fi_fds
));
3240 memset(fp
->fi_access
, 0, sizeof(fp
->fi_access
));
3241 #ifdef CONFIG_NFSD_PNFS
3242 INIT_LIST_HEAD(&fp
->fi_lo_states
);
3243 atomic_set(&fp
->fi_lo_recalls
, 0);
3245 hlist_add_head_rcu(&fp
->fi_hash
, &file_hashtbl
[hashval
]);
3249 nfsd4_free_slabs(void)
3251 kmem_cache_destroy(odstate_slab
);
3252 kmem_cache_destroy(openowner_slab
);
3253 kmem_cache_destroy(lockowner_slab
);
3254 kmem_cache_destroy(file_slab
);
3255 kmem_cache_destroy(stateid_slab
);
3256 kmem_cache_destroy(deleg_slab
);
3260 nfsd4_init_slabs(void)
3262 openowner_slab
= kmem_cache_create("nfsd4_openowners",
3263 sizeof(struct nfs4_openowner
), 0, 0, NULL
);
3264 if (openowner_slab
== NULL
)
3266 lockowner_slab
= kmem_cache_create("nfsd4_lockowners",
3267 sizeof(struct nfs4_lockowner
), 0, 0, NULL
);
3268 if (lockowner_slab
== NULL
)
3269 goto out_free_openowner_slab
;
3270 file_slab
= kmem_cache_create("nfsd4_files",
3271 sizeof(struct nfs4_file
), 0, 0, NULL
);
3272 if (file_slab
== NULL
)
3273 goto out_free_lockowner_slab
;
3274 stateid_slab
= kmem_cache_create("nfsd4_stateids",
3275 sizeof(struct nfs4_ol_stateid
), 0, 0, NULL
);
3276 if (stateid_slab
== NULL
)
3277 goto out_free_file_slab
;
3278 deleg_slab
= kmem_cache_create("nfsd4_delegations",
3279 sizeof(struct nfs4_delegation
), 0, 0, NULL
);
3280 if (deleg_slab
== NULL
)
3281 goto out_free_stateid_slab
;
3282 odstate_slab
= kmem_cache_create("nfsd4_odstate",
3283 sizeof(struct nfs4_clnt_odstate
), 0, 0, NULL
);
3284 if (odstate_slab
== NULL
)
3285 goto out_free_deleg_slab
;
3288 out_free_deleg_slab
:
3289 kmem_cache_destroy(deleg_slab
);
3290 out_free_stateid_slab
:
3291 kmem_cache_destroy(stateid_slab
);
3293 kmem_cache_destroy(file_slab
);
3294 out_free_lockowner_slab
:
3295 kmem_cache_destroy(lockowner_slab
);
3296 out_free_openowner_slab
:
3297 kmem_cache_destroy(openowner_slab
);
3299 dprintk("nfsd4: out of memory while initializing nfsv4\n");
3303 static void init_nfs4_replay(struct nfs4_replay
*rp
)
3305 rp
->rp_status
= nfserr_serverfault
;
3307 rp
->rp_buf
= rp
->rp_ibuf
;
3308 mutex_init(&rp
->rp_mutex
);
3311 static void nfsd4_cstate_assign_replay(struct nfsd4_compound_state
*cstate
,
3312 struct nfs4_stateowner
*so
)
3314 if (!nfsd4_has_session(cstate
)) {
3315 mutex_lock(&so
->so_replay
.rp_mutex
);
3316 cstate
->replay_owner
= nfs4_get_stateowner(so
);
3320 void nfsd4_cstate_clear_replay(struct nfsd4_compound_state
*cstate
)
3322 struct nfs4_stateowner
*so
= cstate
->replay_owner
;
3325 cstate
->replay_owner
= NULL
;
3326 mutex_unlock(&so
->so_replay
.rp_mutex
);
3327 nfs4_put_stateowner(so
);
3331 static inline void *alloc_stateowner(struct kmem_cache
*slab
, struct xdr_netobj
*owner
, struct nfs4_client
*clp
)
3333 struct nfs4_stateowner
*sop
;
3335 sop
= kmem_cache_alloc(slab
, GFP_KERNEL
);
3339 sop
->so_owner
.data
= kmemdup(owner
->data
, owner
->len
, GFP_KERNEL
);
3340 if (!sop
->so_owner
.data
) {
3341 kmem_cache_free(slab
, sop
);
3344 sop
->so_owner
.len
= owner
->len
;
3346 INIT_LIST_HEAD(&sop
->so_stateids
);
3347 sop
->so_client
= clp
;
3348 init_nfs4_replay(&sop
->so_replay
);
3349 atomic_set(&sop
->so_count
, 1);
3353 static void hash_openowner(struct nfs4_openowner
*oo
, struct nfs4_client
*clp
, unsigned int strhashval
)
3355 lockdep_assert_held(&clp
->cl_lock
);
3357 list_add(&oo
->oo_owner
.so_strhash
,
3358 &clp
->cl_ownerstr_hashtbl
[strhashval
]);
3359 list_add(&oo
->oo_perclient
, &clp
->cl_openowners
);
3362 static void nfs4_unhash_openowner(struct nfs4_stateowner
*so
)
3364 unhash_openowner_locked(openowner(so
));
3367 static void nfs4_free_openowner(struct nfs4_stateowner
*so
)
3369 struct nfs4_openowner
*oo
= openowner(so
);
3371 kmem_cache_free(openowner_slab
, oo
);
3374 static const struct nfs4_stateowner_operations openowner_ops
= {
3375 .so_unhash
= nfs4_unhash_openowner
,
3376 .so_free
= nfs4_free_openowner
,
3379 static struct nfs4_ol_stateid
*
3380 nfsd4_find_existing_open(struct nfs4_file
*fp
, struct nfsd4_open
*open
)
3382 struct nfs4_ol_stateid
*local
, *ret
= NULL
;
3383 struct nfs4_openowner
*oo
= open
->op_openowner
;
3385 lockdep_assert_held(&fp
->fi_lock
);
3387 list_for_each_entry(local
, &fp
->fi_stateids
, st_perfile
) {
3388 /* ignore lock owners */
3389 if (local
->st_stateowner
->so_is_open_owner
== 0)
3391 if (local
->st_stateowner
!= &oo
->oo_owner
)
3393 if (local
->st_stid
.sc_type
== NFS4_OPEN_STID
) {
3395 atomic_inc(&ret
->st_stid
.sc_count
);
3403 nfsd4_verify_open_stid(struct nfs4_stid
*s
)
3405 __be32 ret
= nfs_ok
;
3407 switch (s
->sc_type
) {
3410 case NFS4_CLOSED_STID
:
3411 case NFS4_CLOSED_DELEG_STID
:
3412 ret
= nfserr_bad_stateid
;
3414 case NFS4_REVOKED_DELEG_STID
:
3415 ret
= nfserr_deleg_revoked
;
3420 /* Lock the stateid st_mutex, and deal with races with CLOSE */
3422 nfsd4_lock_ol_stateid(struct nfs4_ol_stateid
*stp
)
3426 mutex_lock(&stp
->st_mutex
);
3427 ret
= nfsd4_verify_open_stid(&stp
->st_stid
);
3429 mutex_unlock(&stp
->st_mutex
);
3433 static struct nfs4_ol_stateid
*
3434 nfsd4_find_and_lock_existing_open(struct nfs4_file
*fp
, struct nfsd4_open
*open
)
3436 struct nfs4_ol_stateid
*stp
;
3438 spin_lock(&fp
->fi_lock
);
3439 stp
= nfsd4_find_existing_open(fp
, open
);
3440 spin_unlock(&fp
->fi_lock
);
3441 if (!stp
|| nfsd4_lock_ol_stateid(stp
) == nfs_ok
)
3443 nfs4_put_stid(&stp
->st_stid
);
3448 static struct nfs4_openowner
*
3449 alloc_init_open_stateowner(unsigned int strhashval
, struct nfsd4_open
*open
,
3450 struct nfsd4_compound_state
*cstate
)
3452 struct nfs4_client
*clp
= cstate
->clp
;
3453 struct nfs4_openowner
*oo
, *ret
;
3455 oo
= alloc_stateowner(openowner_slab
, &open
->op_owner
, clp
);
3458 oo
->oo_owner
.so_ops
= &openowner_ops
;
3459 oo
->oo_owner
.so_is_open_owner
= 1;
3460 oo
->oo_owner
.so_seqid
= open
->op_seqid
;
3462 if (nfsd4_has_session(cstate
))
3463 oo
->oo_flags
|= NFS4_OO_CONFIRMED
;
3465 oo
->oo_last_closed_stid
= NULL
;
3466 INIT_LIST_HEAD(&oo
->oo_close_lru
);
3467 spin_lock(&clp
->cl_lock
);
3468 ret
= find_openstateowner_str_locked(strhashval
, open
, clp
);
3470 hash_openowner(oo
, clp
, strhashval
);
3473 nfs4_free_stateowner(&oo
->oo_owner
);
3475 spin_unlock(&clp
->cl_lock
);
3479 static struct nfs4_ol_stateid
*
3480 init_open_stateid(struct nfs4_file
*fp
, struct nfsd4_open
*open
)
3483 struct nfs4_openowner
*oo
= open
->op_openowner
;
3484 struct nfs4_ol_stateid
*retstp
= NULL
;
3485 struct nfs4_ol_stateid
*stp
;
3488 /* We are moving these outside of the spinlocks to avoid the warnings */
3489 mutex_init(&stp
->st_mutex
);
3490 mutex_lock(&stp
->st_mutex
);
3493 spin_lock(&oo
->oo_owner
.so_client
->cl_lock
);
3494 spin_lock(&fp
->fi_lock
);
3496 retstp
= nfsd4_find_existing_open(fp
, open
);
3500 open
->op_stp
= NULL
;
3501 atomic_inc(&stp
->st_stid
.sc_count
);
3502 stp
->st_stid
.sc_type
= NFS4_OPEN_STID
;
3503 INIT_LIST_HEAD(&stp
->st_locks
);
3504 stp
->st_stateowner
= nfs4_get_stateowner(&oo
->oo_owner
);
3506 stp
->st_stid
.sc_file
= fp
;
3507 stp
->st_access_bmap
= 0;
3508 stp
->st_deny_bmap
= 0;
3509 stp
->st_openstp
= NULL
;
3510 list_add(&stp
->st_perstateowner
, &oo
->oo_owner
.so_stateids
);
3511 list_add(&stp
->st_perfile
, &fp
->fi_stateids
);
3514 spin_unlock(&fp
->fi_lock
);
3515 spin_unlock(&oo
->oo_owner
.so_client
->cl_lock
);
3517 /* Handle races with CLOSE */
3518 if (nfsd4_lock_ol_stateid(retstp
) != nfs_ok
) {
3519 nfs4_put_stid(&retstp
->st_stid
);
3522 /* To keep mutex tracking happy */
3523 mutex_unlock(&stp
->st_mutex
);
3530 * In the 4.0 case we need to keep the owners around a little while to handle
3531 * CLOSE replay. We still do need to release any file access that is held by
3532 * them before returning however.
3535 move_to_close_lru(struct nfs4_ol_stateid
*s
, struct net
*net
)
3537 struct nfs4_ol_stateid
*last
;
3538 struct nfs4_openowner
*oo
= openowner(s
->st_stateowner
);
3539 struct nfsd_net
*nn
= net_generic(s
->st_stid
.sc_client
->net
,
3542 dprintk("NFSD: move_to_close_lru nfs4_openowner %p\n", oo
);
3545 * We know that we hold one reference via nfsd4_close, and another
3546 * "persistent" reference for the client. If the refcount is higher
3547 * than 2, then there are still calls in progress that are using this
3548 * stateid. We can't put the sc_file reference until they are finished.
3549 * Wait for the refcount to drop to 2. Since it has been unhashed,
3550 * there should be no danger of the refcount going back up again at
3553 wait_event(close_wq
, atomic_read(&s
->st_stid
.sc_count
) == 2);
3555 release_all_access(s
);
3556 if (s
->st_stid
.sc_file
) {
3557 put_nfs4_file(s
->st_stid
.sc_file
);
3558 s
->st_stid
.sc_file
= NULL
;
3561 spin_lock(&nn
->client_lock
);
3562 last
= oo
->oo_last_closed_stid
;
3563 oo
->oo_last_closed_stid
= s
;
3564 list_move_tail(&oo
->oo_close_lru
, &nn
->close_lru
);
3565 oo
->oo_time
= get_seconds();
3566 spin_unlock(&nn
->client_lock
);
3568 nfs4_put_stid(&last
->st_stid
);
3571 /* search file_hashtbl[] for file */
3572 static struct nfs4_file
*
3573 find_file_locked(struct knfsd_fh
*fh
, unsigned int hashval
)
3575 struct nfs4_file
*fp
;
3577 hlist_for_each_entry_rcu(fp
, &file_hashtbl
[hashval
], fi_hash
) {
3578 if (fh_match(&fp
->fi_fhandle
, fh
)) {
3579 if (atomic_inc_not_zero(&fp
->fi_ref
))
3587 find_file(struct knfsd_fh
*fh
)
3589 struct nfs4_file
*fp
;
3590 unsigned int hashval
= file_hashval(fh
);
3593 fp
= find_file_locked(fh
, hashval
);
3598 static struct nfs4_file
*
3599 find_or_add_file(struct nfs4_file
*new, struct knfsd_fh
*fh
)
3601 struct nfs4_file
*fp
;
3602 unsigned int hashval
= file_hashval(fh
);
3605 fp
= find_file_locked(fh
, hashval
);
3610 spin_lock(&state_lock
);
3611 fp
= find_file_locked(fh
, hashval
);
3612 if (likely(fp
== NULL
)) {
3613 nfsd4_init_file(fh
, hashval
, new);
3616 spin_unlock(&state_lock
);
3622 * Called to check deny when READ with all zero stateid or
3623 * WRITE with all zero or all one stateid
3626 nfs4_share_conflict(struct svc_fh
*current_fh
, unsigned int deny_type
)
3628 struct nfs4_file
*fp
;
3629 __be32 ret
= nfs_ok
;
3631 fp
= find_file(¤t_fh
->fh_handle
);
3634 /* Check for conflicting share reservations */
3635 spin_lock(&fp
->fi_lock
);
3636 if (fp
->fi_share_deny
& deny_type
)
3637 ret
= nfserr_locked
;
3638 spin_unlock(&fp
->fi_lock
);
3643 static void nfsd4_cb_recall_prepare(struct nfsd4_callback
*cb
)
3645 struct nfs4_delegation
*dp
= cb_to_delegation(cb
);
3646 struct nfsd_net
*nn
= net_generic(dp
->dl_stid
.sc_client
->net
,
3649 block_delegations(&dp
->dl_stid
.sc_file
->fi_fhandle
);
3652 * We can't do this in nfsd_break_deleg_cb because it is
3653 * already holding inode->i_lock.
3655 * If the dl_time != 0, then we know that it has already been
3656 * queued for a lease break. Don't queue it again.
3658 spin_lock(&state_lock
);
3659 if (dp
->dl_time
== 0) {
3660 dp
->dl_time
= get_seconds();
3661 list_add_tail(&dp
->dl_recall_lru
, &nn
->del_recall_lru
);
3663 spin_unlock(&state_lock
);
3666 static int nfsd4_cb_recall_done(struct nfsd4_callback
*cb
,
3667 struct rpc_task
*task
)
3669 struct nfs4_delegation
*dp
= cb_to_delegation(cb
);
3671 if (dp
->dl_stid
.sc_type
== NFS4_CLOSED_DELEG_STID
)
3674 switch (task
->tk_status
) {
3678 case -NFS4ERR_BAD_STATEID
:
3680 * Race: client probably got cb_recall before open reply
3681 * granting delegation.
3683 if (dp
->dl_retries
--) {
3684 rpc_delay(task
, 2 * HZ
);
3693 static void nfsd4_cb_recall_release(struct nfsd4_callback
*cb
)
3695 struct nfs4_delegation
*dp
= cb_to_delegation(cb
);
3697 nfs4_put_stid(&dp
->dl_stid
);
3700 static struct nfsd4_callback_ops nfsd4_cb_recall_ops
= {
3701 .prepare
= nfsd4_cb_recall_prepare
,
3702 .done
= nfsd4_cb_recall_done
,
3703 .release
= nfsd4_cb_recall_release
,
3706 static void nfsd_break_one_deleg(struct nfs4_delegation
*dp
)
3709 * We're assuming the state code never drops its reference
3710 * without first removing the lease. Since we're in this lease
3711 * callback (and since the lease code is serialized by the kernel
3712 * lock) we know the server hasn't removed the lease yet, we know
3713 * it's safe to take a reference.
3715 atomic_inc(&dp
->dl_stid
.sc_count
);
3716 nfsd4_run_cb(&dp
->dl_recall
);
3719 /* Called from break_lease() with i_lock held. */
3721 nfsd_break_deleg_cb(struct file_lock
*fl
)
3724 struct nfs4_file
*fp
= (struct nfs4_file
*)fl
->fl_owner
;
3725 struct nfs4_delegation
*dp
;
3728 WARN(1, "(%p)->fl_owner NULL\n", fl
);
3731 if (fp
->fi_had_conflict
) {
3732 WARN(1, "duplicate break on %p\n", fp
);
3736 * We don't want the locks code to timeout the lease for us;
3737 * we'll remove it ourself if a delegation isn't returned
3740 fl
->fl_break_time
= 0;
3742 spin_lock(&fp
->fi_lock
);
3743 fp
->fi_had_conflict
= true;
3745 * If there are no delegations on the list, then return true
3746 * so that the lease code will go ahead and delete it.
3748 if (list_empty(&fp
->fi_delegations
))
3751 list_for_each_entry(dp
, &fp
->fi_delegations
, dl_perfile
)
3752 nfsd_break_one_deleg(dp
);
3753 spin_unlock(&fp
->fi_lock
);
3758 nfsd_change_deleg_cb(struct file_lock
*onlist
, int arg
,
3759 struct list_head
*dispose
)
3762 return lease_modify(onlist
, arg
, dispose
);
3767 static const struct lock_manager_operations nfsd_lease_mng_ops
= {
3768 .lm_break
= nfsd_break_deleg_cb
,
3769 .lm_change
= nfsd_change_deleg_cb
,
3772 static __be32
nfsd4_check_seqid(struct nfsd4_compound_state
*cstate
, struct nfs4_stateowner
*so
, u32 seqid
)
3774 if (nfsd4_has_session(cstate
))
3776 if (seqid
== so
->so_seqid
- 1)
3777 return nfserr_replay_me
;
3778 if (seqid
== so
->so_seqid
)
3780 return nfserr_bad_seqid
;
3783 static __be32
lookup_clientid(clientid_t
*clid
,
3784 struct nfsd4_compound_state
*cstate
,
3785 struct nfsd_net
*nn
)
3787 struct nfs4_client
*found
;
3790 found
= cstate
->clp
;
3791 if (!same_clid(&found
->cl_clientid
, clid
))
3792 return nfserr_stale_clientid
;
3796 if (STALE_CLIENTID(clid
, nn
))
3797 return nfserr_stale_clientid
;
3800 * For v4.1+ we get the client in the SEQUENCE op. If we don't have one
3801 * cached already then we know this is for is for v4.0 and "sessions"
3804 WARN_ON_ONCE(cstate
->session
);
3805 spin_lock(&nn
->client_lock
);
3806 found
= find_confirmed_client(clid
, false, nn
);
3808 spin_unlock(&nn
->client_lock
);
3809 return nfserr_expired
;
3811 atomic_inc(&found
->cl_refcount
);
3812 spin_unlock(&nn
->client_lock
);
3814 /* Cache the nfs4_client in cstate! */
3815 cstate
->clp
= found
;
3820 nfsd4_process_open1(struct nfsd4_compound_state
*cstate
,
3821 struct nfsd4_open
*open
, struct nfsd_net
*nn
)
3823 clientid_t
*clientid
= &open
->op_clientid
;
3824 struct nfs4_client
*clp
= NULL
;
3825 unsigned int strhashval
;
3826 struct nfs4_openowner
*oo
= NULL
;
3829 if (STALE_CLIENTID(&open
->op_clientid
, nn
))
3830 return nfserr_stale_clientid
;
3832 * In case we need it later, after we've already created the
3833 * file and don't want to risk a further failure:
3835 open
->op_file
= nfsd4_alloc_file();
3836 if (open
->op_file
== NULL
)
3837 return nfserr_jukebox
;
3839 status
= lookup_clientid(clientid
, cstate
, nn
);
3844 strhashval
= ownerstr_hashval(&open
->op_owner
);
3845 oo
= find_openstateowner_str(strhashval
, open
, clp
);
3846 open
->op_openowner
= oo
;
3850 if (!(oo
->oo_flags
& NFS4_OO_CONFIRMED
)) {
3851 /* Replace unconfirmed owners without checking for replay. */
3852 release_openowner(oo
);
3853 open
->op_openowner
= NULL
;
3856 status
= nfsd4_check_seqid(cstate
, &oo
->oo_owner
, open
->op_seqid
);
3861 oo
= alloc_init_open_stateowner(strhashval
, open
, cstate
);
3863 return nfserr_jukebox
;
3864 open
->op_openowner
= oo
;
3866 open
->op_stp
= nfs4_alloc_open_stateid(clp
);
3868 return nfserr_jukebox
;
3870 if (nfsd4_has_session(cstate
) &&
3871 (cstate
->current_fh
.fh_export
->ex_flags
& NFSEXP_PNFS
)) {
3872 open
->op_odstate
= alloc_clnt_odstate(clp
);
3873 if (!open
->op_odstate
)
3874 return nfserr_jukebox
;
3880 static inline __be32
3881 nfs4_check_delegmode(struct nfs4_delegation
*dp
, int flags
)
3883 if ((flags
& WR_STATE
) && (dp
->dl_type
== NFS4_OPEN_DELEGATE_READ
))
3884 return nfserr_openmode
;
3889 static int share_access_to_flags(u32 share_access
)
3891 return share_access
== NFS4_SHARE_ACCESS_READ
? RD_STATE
: WR_STATE
;
3894 static struct nfs4_delegation
*find_deleg_stateid(struct nfs4_client
*cl
, stateid_t
*s
)
3896 struct nfs4_stid
*ret
;
3898 ret
= find_stateid_by_type(cl
, s
,
3899 NFS4_DELEG_STID
|NFS4_REVOKED_DELEG_STID
);
3902 return delegstateid(ret
);
3905 static bool nfsd4_is_deleg_cur(struct nfsd4_open
*open
)
3907 return open
->op_claim_type
== NFS4_OPEN_CLAIM_DELEGATE_CUR
||
3908 open
->op_claim_type
== NFS4_OPEN_CLAIM_DELEG_CUR_FH
;
3912 nfs4_check_deleg(struct nfs4_client
*cl
, struct nfsd4_open
*open
,
3913 struct nfs4_delegation
**dp
)
3916 __be32 status
= nfserr_bad_stateid
;
3917 struct nfs4_delegation
*deleg
;
3919 deleg
= find_deleg_stateid(cl
, &open
->op_delegate_stateid
);
3922 if (deleg
->dl_stid
.sc_type
== NFS4_REVOKED_DELEG_STID
) {
3923 nfs4_put_stid(&deleg
->dl_stid
);
3924 if (cl
->cl_minorversion
)
3925 status
= nfserr_deleg_revoked
;
3928 flags
= share_access_to_flags(open
->op_share_access
);
3929 status
= nfs4_check_delegmode(deleg
, flags
);
3931 nfs4_put_stid(&deleg
->dl_stid
);
3936 if (!nfsd4_is_deleg_cur(open
))
3940 open
->op_openowner
->oo_flags
|= NFS4_OO_CONFIRMED
;
3944 static inline int nfs4_access_to_access(u32 nfs4_access
)
3948 if (nfs4_access
& NFS4_SHARE_ACCESS_READ
)
3949 flags
|= NFSD_MAY_READ
;
3950 if (nfs4_access
& NFS4_SHARE_ACCESS_WRITE
)
3951 flags
|= NFSD_MAY_WRITE
;
3955 static inline __be32
3956 nfsd4_truncate(struct svc_rqst
*rqstp
, struct svc_fh
*fh
,
3957 struct nfsd4_open
*open
)
3959 struct iattr iattr
= {
3960 .ia_valid
= ATTR_SIZE
,
3963 if (!open
->op_truncate
)
3965 if (!(open
->op_share_access
& NFS4_SHARE_ACCESS_WRITE
))
3966 return nfserr_inval
;
3967 return nfsd_setattr(rqstp
, fh
, &iattr
, 0, (time_t)0);
3970 static __be32
nfs4_get_vfs_file(struct svc_rqst
*rqstp
, struct nfs4_file
*fp
,
3971 struct svc_fh
*cur_fh
, struct nfs4_ol_stateid
*stp
,
3972 struct nfsd4_open
*open
)
3974 struct file
*filp
= NULL
;
3976 int oflag
= nfs4_access_to_omode(open
->op_share_access
);
3977 int access
= nfs4_access_to_access(open
->op_share_access
);
3978 unsigned char old_access_bmap
, old_deny_bmap
;
3980 spin_lock(&fp
->fi_lock
);
3983 * Are we trying to set a deny mode that would conflict with
3986 status
= nfs4_file_check_deny(fp
, open
->op_share_deny
);
3987 if (status
!= nfs_ok
) {
3988 spin_unlock(&fp
->fi_lock
);
3992 /* set access to the file */
3993 status
= nfs4_file_get_access(fp
, open
->op_share_access
);
3994 if (status
!= nfs_ok
) {
3995 spin_unlock(&fp
->fi_lock
);
3999 /* Set access bits in stateid */
4000 old_access_bmap
= stp
->st_access_bmap
;
4001 set_access(open
->op_share_access
, stp
);
4003 /* Set new deny mask */
4004 old_deny_bmap
= stp
->st_deny_bmap
;
4005 set_deny(open
->op_share_deny
, stp
);
4006 fp
->fi_share_deny
|= (open
->op_share_deny
& NFS4_SHARE_DENY_BOTH
);
4008 if (!fp
->fi_fds
[oflag
]) {
4009 spin_unlock(&fp
->fi_lock
);
4010 status
= nfsd_open(rqstp
, cur_fh
, S_IFREG
, access
, &filp
);
4012 goto out_put_access
;
4013 spin_lock(&fp
->fi_lock
);
4014 if (!fp
->fi_fds
[oflag
]) {
4015 fp
->fi_fds
[oflag
] = filp
;
4019 spin_unlock(&fp
->fi_lock
);
4023 status
= nfsd4_truncate(rqstp
, cur_fh
, open
);
4025 goto out_put_access
;
4029 stp
->st_access_bmap
= old_access_bmap
;
4030 nfs4_file_put_access(fp
, open
->op_share_access
);
4031 reset_union_bmap_deny(bmap_to_share_mode(old_deny_bmap
), stp
);
4036 nfs4_upgrade_open(struct svc_rqst
*rqstp
, struct nfs4_file
*fp
, struct svc_fh
*cur_fh
, struct nfs4_ol_stateid
*stp
, struct nfsd4_open
*open
)
4039 unsigned char old_deny_bmap
= stp
->st_deny_bmap
;
4041 if (!test_access(open
->op_share_access
, stp
))
4042 return nfs4_get_vfs_file(rqstp
, fp
, cur_fh
, stp
, open
);
4044 /* test and set deny mode */
4045 spin_lock(&fp
->fi_lock
);
4046 status
= nfs4_file_check_deny(fp
, open
->op_share_deny
);
4047 if (status
== nfs_ok
) {
4048 set_deny(open
->op_share_deny
, stp
);
4049 fp
->fi_share_deny
|=
4050 (open
->op_share_deny
& NFS4_SHARE_DENY_BOTH
);
4052 spin_unlock(&fp
->fi_lock
);
4054 if (status
!= nfs_ok
)
4057 status
= nfsd4_truncate(rqstp
, cur_fh
, open
);
4058 if (status
!= nfs_ok
)
4059 reset_union_bmap_deny(old_deny_bmap
, stp
);
4063 /* Should we give out recallable state?: */
4064 static bool nfsd4_cb_channel_good(struct nfs4_client
*clp
)
4066 if (clp
->cl_cb_state
== NFSD4_CB_UP
)
4069 * In the sessions case, since we don't have to establish a
4070 * separate connection for callbacks, we assume it's OK
4071 * until we hear otherwise:
4073 return clp
->cl_minorversion
&& clp
->cl_cb_state
== NFSD4_CB_UNKNOWN
;
4076 static struct file_lock
*nfs4_alloc_init_lease(struct nfs4_file
*fp
, int flag
)
4078 struct file_lock
*fl
;
4080 fl
= locks_alloc_lock();
4083 fl
->fl_lmops
= &nfsd_lease_mng_ops
;
4084 fl
->fl_flags
= FL_DELEG
;
4085 fl
->fl_type
= flag
== NFS4_OPEN_DELEGATE_READ
? F_RDLCK
: F_WRLCK
;
4086 fl
->fl_end
= OFFSET_MAX
;
4087 fl
->fl_owner
= (fl_owner_t
)fp
;
4088 fl
->fl_pid
= current
->tgid
;
4093 * nfs4_setlease - Obtain a delegation by requesting lease from vfs layer
4094 * @dp: a pointer to the nfs4_delegation we're adding.
4097 * On success: Return code will be 0 on success.
4099 * On error: -EAGAIN if there was an existing delegation.
4100 * nonzero if there is an error in other cases.
4104 static int nfs4_setlease(struct nfs4_delegation
*dp
)
4106 struct nfs4_file
*fp
= dp
->dl_stid
.sc_file
;
4107 struct file_lock
*fl
;
4111 fl
= nfs4_alloc_init_lease(fp
, NFS4_OPEN_DELEGATE_READ
);
4114 filp
= find_readable_file(fp
);
4116 /* We should always have a readable file here */
4118 locks_free_lock(fl
);
4122 status
= vfs_setlease(filp
, fl
->fl_type
, &fl
, NULL
);
4124 locks_free_lock(fl
);
4127 spin_lock(&state_lock
);
4128 spin_lock(&fp
->fi_lock
);
4129 /* Did the lease get broken before we took the lock? */
4131 if (fp
->fi_had_conflict
)
4134 if (fp
->fi_deleg_file
) {
4135 status
= hash_delegation_locked(dp
, fp
);
4138 fp
->fi_deleg_file
= filp
;
4139 fp
->fi_delegees
= 0;
4140 status
= hash_delegation_locked(dp
, fp
);
4141 spin_unlock(&fp
->fi_lock
);
4142 spin_unlock(&state_lock
);
4144 /* Should never happen, this is a new fi_deleg_file */
4150 spin_unlock(&fp
->fi_lock
);
4151 spin_unlock(&state_lock
);
4157 static struct nfs4_delegation
*
4158 nfs4_set_delegation(struct nfs4_client
*clp
, struct svc_fh
*fh
,
4159 struct nfs4_file
*fp
, struct nfs4_clnt_odstate
*odstate
)
4162 struct nfs4_delegation
*dp
;
4164 if (fp
->fi_had_conflict
)
4165 return ERR_PTR(-EAGAIN
);
4167 spin_lock(&state_lock
);
4168 spin_lock(&fp
->fi_lock
);
4169 status
= nfs4_get_existing_delegation(clp
, fp
);
4170 spin_unlock(&fp
->fi_lock
);
4171 spin_unlock(&state_lock
);
4174 return ERR_PTR(status
);
4176 dp
= alloc_init_deleg(clp
, fh
, odstate
);
4178 return ERR_PTR(-ENOMEM
);
4181 spin_lock(&state_lock
);
4182 spin_lock(&fp
->fi_lock
);
4183 dp
->dl_stid
.sc_file
= fp
;
4184 if (!fp
->fi_deleg_file
) {
4185 spin_unlock(&fp
->fi_lock
);
4186 spin_unlock(&state_lock
);
4187 status
= nfs4_setlease(dp
);
4190 if (fp
->fi_had_conflict
) {
4194 status
= hash_delegation_locked(dp
, fp
);
4196 spin_unlock(&fp
->fi_lock
);
4197 spin_unlock(&state_lock
);
4200 put_clnt_odstate(dp
->dl_clnt_odstate
);
4201 nfs4_put_stid(&dp
->dl_stid
);
4202 return ERR_PTR(status
);
4207 static void nfsd4_open_deleg_none_ext(struct nfsd4_open
*open
, int status
)
4209 open
->op_delegate_type
= NFS4_OPEN_DELEGATE_NONE_EXT
;
4210 if (status
== -EAGAIN
)
4211 open
->op_why_no_deleg
= WND4_CONTENTION
;
4213 open
->op_why_no_deleg
= WND4_RESOURCE
;
4214 switch (open
->op_deleg_want
) {
4215 case NFS4_SHARE_WANT_READ_DELEG
:
4216 case NFS4_SHARE_WANT_WRITE_DELEG
:
4217 case NFS4_SHARE_WANT_ANY_DELEG
:
4219 case NFS4_SHARE_WANT_CANCEL
:
4220 open
->op_why_no_deleg
= WND4_CANCELLED
;
4222 case NFS4_SHARE_WANT_NO_DELEG
:
4229 * Attempt to hand out a delegation.
4231 * Note we don't support write delegations, and won't until the vfs has
4232 * proper support for them.
4235 nfs4_open_delegation(struct svc_fh
*fh
, struct nfsd4_open
*open
,
4236 struct nfs4_ol_stateid
*stp
)
4238 struct nfs4_delegation
*dp
;
4239 struct nfs4_openowner
*oo
= openowner(stp
->st_stateowner
);
4240 struct nfs4_client
*clp
= stp
->st_stid
.sc_client
;
4244 cb_up
= nfsd4_cb_channel_good(oo
->oo_owner
.so_client
);
4245 open
->op_recall
= 0;
4246 switch (open
->op_claim_type
) {
4247 case NFS4_OPEN_CLAIM_PREVIOUS
:
4249 open
->op_recall
= 1;
4250 if (open
->op_delegate_type
!= NFS4_OPEN_DELEGATE_READ
)
4253 case NFS4_OPEN_CLAIM_NULL
:
4254 case NFS4_OPEN_CLAIM_FH
:
4256 * Let's not give out any delegations till everyone's
4257 * had the chance to reclaim theirs, *and* until
4258 * NLM locks have all been reclaimed:
4260 if (locks_in_grace(clp
->net
))
4262 if (!cb_up
|| !(oo
->oo_flags
& NFS4_OO_CONFIRMED
))
4265 * Also, if the file was opened for write or
4266 * create, there's a good chance the client's
4267 * about to write to it, resulting in an
4268 * immediate recall (since we don't support
4269 * write delegations):
4271 if (open
->op_share_access
& NFS4_SHARE_ACCESS_WRITE
)
4273 if (open
->op_create
== NFS4_OPEN_CREATE
)
4279 dp
= nfs4_set_delegation(clp
, fh
, stp
->st_stid
.sc_file
, stp
->st_clnt_odstate
);
4283 memcpy(&open
->op_delegate_stateid
, &dp
->dl_stid
.sc_stateid
, sizeof(dp
->dl_stid
.sc_stateid
));
4285 dprintk("NFSD: delegation stateid=" STATEID_FMT
"\n",
4286 STATEID_VAL(&dp
->dl_stid
.sc_stateid
));
4287 open
->op_delegate_type
= NFS4_OPEN_DELEGATE_READ
;
4288 nfs4_put_stid(&dp
->dl_stid
);
4291 open
->op_delegate_type
= NFS4_OPEN_DELEGATE_NONE
;
4292 if (open
->op_claim_type
== NFS4_OPEN_CLAIM_PREVIOUS
&&
4293 open
->op_delegate_type
!= NFS4_OPEN_DELEGATE_NONE
) {
4294 dprintk("NFSD: WARNING: refusing delegation reclaim\n");
4295 open
->op_recall
= 1;
4298 /* 4.1 client asking for a delegation? */
4299 if (open
->op_deleg_want
)
4300 nfsd4_open_deleg_none_ext(open
, status
);
4304 static void nfsd4_deleg_xgrade_none_ext(struct nfsd4_open
*open
,
4305 struct nfs4_delegation
*dp
)
4307 if (open
->op_deleg_want
== NFS4_SHARE_WANT_READ_DELEG
&&
4308 dp
->dl_type
== NFS4_OPEN_DELEGATE_WRITE
) {
4309 open
->op_delegate_type
= NFS4_OPEN_DELEGATE_NONE_EXT
;
4310 open
->op_why_no_deleg
= WND4_NOT_SUPP_DOWNGRADE
;
4311 } else if (open
->op_deleg_want
== NFS4_SHARE_WANT_WRITE_DELEG
&&
4312 dp
->dl_type
== NFS4_OPEN_DELEGATE_WRITE
) {
4313 open
->op_delegate_type
= NFS4_OPEN_DELEGATE_NONE_EXT
;
4314 open
->op_why_no_deleg
= WND4_NOT_SUPP_UPGRADE
;
4316 /* Otherwise the client must be confused wanting a delegation
4317 * it already has, therefore we don't return
4318 * NFS4_OPEN_DELEGATE_NONE_EXT and reason.
4323 nfsd4_process_open2(struct svc_rqst
*rqstp
, struct svc_fh
*current_fh
, struct nfsd4_open
*open
)
4325 struct nfsd4_compoundres
*resp
= rqstp
->rq_resp
;
4326 struct nfs4_client
*cl
= open
->op_openowner
->oo_owner
.so_client
;
4327 struct nfs4_file
*fp
= NULL
;
4328 struct nfs4_ol_stateid
*stp
= NULL
;
4329 struct nfs4_delegation
*dp
= NULL
;
4331 bool new_stp
= false;
4334 * Lookup file; if found, lookup stateid and check open request,
4335 * and check for delegations in the process of being recalled.
4336 * If not found, create the nfs4_file struct
4338 fp
= find_or_add_file(open
->op_file
, ¤t_fh
->fh_handle
);
4339 if (fp
!= open
->op_file
) {
4340 status
= nfs4_check_deleg(cl
, open
, &dp
);
4343 stp
= nfsd4_find_and_lock_existing_open(fp
, open
);
4345 open
->op_file
= NULL
;
4346 status
= nfserr_bad_stateid
;
4347 if (nfsd4_is_deleg_cur(open
))
4352 stp
= init_open_stateid(fp
, open
);
4358 * OPEN the file, or upgrade an existing OPEN.
4359 * If truncate fails, the OPEN fails.
4361 * stp is already locked.
4364 /* Stateid was found, this is an OPEN upgrade */
4365 status
= nfs4_upgrade_open(rqstp
, fp
, current_fh
, stp
, open
);
4367 mutex_unlock(&stp
->st_mutex
);
4371 status
= nfs4_get_vfs_file(rqstp
, fp
, current_fh
, stp
, open
);
4373 stp
->st_stid
.sc_type
= NFS4_CLOSED_STID
;
4374 release_open_stateid(stp
);
4375 mutex_unlock(&stp
->st_mutex
);
4379 stp
->st_clnt_odstate
= find_or_hash_clnt_odstate(fp
,
4381 if (stp
->st_clnt_odstate
== open
->op_odstate
)
4382 open
->op_odstate
= NULL
;
4385 nfs4_inc_and_copy_stateid(&open
->op_stateid
, &stp
->st_stid
);
4386 mutex_unlock(&stp
->st_mutex
);
4388 if (nfsd4_has_session(&resp
->cstate
)) {
4389 if (open
->op_deleg_want
& NFS4_SHARE_WANT_NO_DELEG
) {
4390 open
->op_delegate_type
= NFS4_OPEN_DELEGATE_NONE_EXT
;
4391 open
->op_why_no_deleg
= WND4_NOT_WANTED
;
4397 * Attempt to hand out a delegation. No error return, because the
4398 * OPEN succeeds even if we fail.
4400 nfs4_open_delegation(current_fh
, open
, stp
);
4404 dprintk("%s: stateid=" STATEID_FMT
"\n", __func__
,
4405 STATEID_VAL(&stp
->st_stid
.sc_stateid
));
4407 /* 4.1 client trying to upgrade/downgrade delegation? */
4408 if (open
->op_delegate_type
== NFS4_OPEN_DELEGATE_NONE
&& dp
&&
4409 open
->op_deleg_want
)
4410 nfsd4_deleg_xgrade_none_ext(open
, dp
);
4414 if (status
== 0 && open
->op_claim_type
== NFS4_OPEN_CLAIM_PREVIOUS
)
4415 open
->op_openowner
->oo_flags
|= NFS4_OO_CONFIRMED
;
4417 * To finish the open response, we just need to set the rflags.
4419 open
->op_rflags
= NFS4_OPEN_RESULT_LOCKTYPE_POSIX
;
4420 if (!(open
->op_openowner
->oo_flags
& NFS4_OO_CONFIRMED
) &&
4421 !nfsd4_has_session(&resp
->cstate
))
4422 open
->op_rflags
|= NFS4_OPEN_RESULT_CONFIRM
;
4424 nfs4_put_stid(&dp
->dl_stid
);
4426 nfs4_put_stid(&stp
->st_stid
);
4431 void nfsd4_cleanup_open_state(struct nfsd4_compound_state
*cstate
,
4432 struct nfsd4_open
*open
)
4434 if (open
->op_openowner
) {
4435 struct nfs4_stateowner
*so
= &open
->op_openowner
->oo_owner
;
4437 nfsd4_cstate_assign_replay(cstate
, so
);
4438 nfs4_put_stateowner(so
);
4441 kmem_cache_free(file_slab
, open
->op_file
);
4443 nfs4_put_stid(&open
->op_stp
->st_stid
);
4444 if (open
->op_odstate
)
4445 kmem_cache_free(odstate_slab
, open
->op_odstate
);
4449 nfsd4_renew(struct svc_rqst
*rqstp
, struct nfsd4_compound_state
*cstate
,
4452 struct nfs4_client
*clp
;
4454 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
4456 dprintk("process_renew(%08x/%08x): starting\n",
4457 clid
->cl_boot
, clid
->cl_id
);
4458 status
= lookup_clientid(clid
, cstate
, nn
);
4462 status
= nfserr_cb_path_down
;
4463 if (!list_empty(&clp
->cl_delegations
)
4464 && clp
->cl_cb_state
!= NFSD4_CB_UP
)
4472 nfsd4_end_grace(struct nfsd_net
*nn
)
4474 /* do nothing if grace period already ended */
4475 if (nn
->grace_ended
)
4478 dprintk("NFSD: end of grace period\n");
4479 nn
->grace_ended
= true;
4481 * If the server goes down again right now, an NFSv4
4482 * client will still be allowed to reclaim after it comes back up,
4483 * even if it hasn't yet had a chance to reclaim state this time.
4486 nfsd4_record_grace_done(nn
);
4488 * At this point, NFSv4 clients can still reclaim. But if the
4489 * server crashes, any that have not yet reclaimed will be out
4490 * of luck on the next boot.
4492 * (NFSv4.1+ clients are considered to have reclaimed once they
4493 * call RECLAIM_COMPLETE. NFSv4.0 clients are considered to
4494 * have reclaimed after their first OPEN.)
4496 locks_end_grace(&nn
->nfsd4_manager
);
4498 * At this point, and once lockd and/or any other containers
4499 * exit their grace period, further reclaims will fail and
4500 * regular locking can resume.
4505 nfs4_laundromat(struct nfsd_net
*nn
)
4507 struct nfs4_client
*clp
;
4508 struct nfs4_openowner
*oo
;
4509 struct nfs4_delegation
*dp
;
4510 struct nfs4_ol_stateid
*stp
;
4511 struct list_head
*pos
, *next
, reaplist
;
4512 time_t cutoff
= get_seconds() - nn
->nfsd4_lease
;
4513 time_t t
, new_timeo
= nn
->nfsd4_lease
;
4515 dprintk("NFSD: laundromat service - starting\n");
4516 nfsd4_end_grace(nn
);
4517 INIT_LIST_HEAD(&reaplist
);
4518 spin_lock(&nn
->client_lock
);
4519 list_for_each_safe(pos
, next
, &nn
->client_lru
) {
4520 clp
= list_entry(pos
, struct nfs4_client
, cl_lru
);
4521 if (time_after((unsigned long)clp
->cl_time
, (unsigned long)cutoff
)) {
4522 t
= clp
->cl_time
- cutoff
;
4523 new_timeo
= min(new_timeo
, t
);
4526 if (mark_client_expired_locked(clp
)) {
4527 dprintk("NFSD: client in use (clientid %08x)\n",
4528 clp
->cl_clientid
.cl_id
);
4531 list_add(&clp
->cl_lru
, &reaplist
);
4533 spin_unlock(&nn
->client_lock
);
4534 list_for_each_safe(pos
, next
, &reaplist
) {
4535 clp
= list_entry(pos
, struct nfs4_client
, cl_lru
);
4536 dprintk("NFSD: purging unused client (clientid %08x)\n",
4537 clp
->cl_clientid
.cl_id
);
4538 list_del_init(&clp
->cl_lru
);
4541 spin_lock(&state_lock
);
4542 list_for_each_safe(pos
, next
, &nn
->del_recall_lru
) {
4543 dp
= list_entry (pos
, struct nfs4_delegation
, dl_recall_lru
);
4544 if (time_after((unsigned long)dp
->dl_time
, (unsigned long)cutoff
)) {
4545 t
= dp
->dl_time
- cutoff
;
4546 new_timeo
= min(new_timeo
, t
);
4549 WARN_ON(!unhash_delegation_locked(dp
));
4550 list_add(&dp
->dl_recall_lru
, &reaplist
);
4552 spin_unlock(&state_lock
);
4553 while (!list_empty(&reaplist
)) {
4554 dp
= list_first_entry(&reaplist
, struct nfs4_delegation
,
4556 list_del_init(&dp
->dl_recall_lru
);
4557 revoke_delegation(dp
);
4560 spin_lock(&nn
->client_lock
);
4561 while (!list_empty(&nn
->close_lru
)) {
4562 oo
= list_first_entry(&nn
->close_lru
, struct nfs4_openowner
,
4564 if (time_after((unsigned long)oo
->oo_time
,
4565 (unsigned long)cutoff
)) {
4566 t
= oo
->oo_time
- cutoff
;
4567 new_timeo
= min(new_timeo
, t
);
4570 list_del_init(&oo
->oo_close_lru
);
4571 stp
= oo
->oo_last_closed_stid
;
4572 oo
->oo_last_closed_stid
= NULL
;
4573 spin_unlock(&nn
->client_lock
);
4574 nfs4_put_stid(&stp
->st_stid
);
4575 spin_lock(&nn
->client_lock
);
4577 spin_unlock(&nn
->client_lock
);
4579 new_timeo
= max_t(time_t, new_timeo
, NFSD_LAUNDROMAT_MINTIMEOUT
);
4583 static struct workqueue_struct
*laundry_wq
;
4584 static void laundromat_main(struct work_struct
*);
4587 laundromat_main(struct work_struct
*laundry
)
4590 struct delayed_work
*dwork
= container_of(laundry
, struct delayed_work
,
4592 struct nfsd_net
*nn
= container_of(dwork
, struct nfsd_net
,
4595 t
= nfs4_laundromat(nn
);
4596 dprintk("NFSD: laundromat_main - sleeping for %ld seconds\n", t
);
4597 queue_delayed_work(laundry_wq
, &nn
->laundromat_work
, t
*HZ
);
4600 static inline __be32
nfs4_check_fh(struct svc_fh
*fhp
, struct nfs4_stid
*stp
)
4602 if (!fh_match(&fhp
->fh_handle
, &stp
->sc_file
->fi_fhandle
))
4603 return nfserr_bad_stateid
;
4608 access_permit_read(struct nfs4_ol_stateid
*stp
)
4610 return test_access(NFS4_SHARE_ACCESS_READ
, stp
) ||
4611 test_access(NFS4_SHARE_ACCESS_BOTH
, stp
) ||
4612 test_access(NFS4_SHARE_ACCESS_WRITE
, stp
);
4616 access_permit_write(struct nfs4_ol_stateid
*stp
)
4618 return test_access(NFS4_SHARE_ACCESS_WRITE
, stp
) ||
4619 test_access(NFS4_SHARE_ACCESS_BOTH
, stp
);
4623 __be32
nfs4_check_openmode(struct nfs4_ol_stateid
*stp
, int flags
)
4625 __be32 status
= nfserr_openmode
;
4627 /* For lock stateid's, we test the parent open, not the lock: */
4628 if (stp
->st_openstp
)
4629 stp
= stp
->st_openstp
;
4630 if ((flags
& WR_STATE
) && !access_permit_write(stp
))
4632 if ((flags
& RD_STATE
) && !access_permit_read(stp
))
4639 static inline __be32
4640 check_special_stateids(struct net
*net
, svc_fh
*current_fh
, stateid_t
*stateid
, int flags
)
4642 if (ONE_STATEID(stateid
) && (flags
& RD_STATE
))
4644 else if (opens_in_grace(net
)) {
4645 /* Answer in remaining cases depends on existence of
4646 * conflicting state; so we must wait out the grace period. */
4647 return nfserr_grace
;
4648 } else if (flags
& WR_STATE
)
4649 return nfs4_share_conflict(current_fh
,
4650 NFS4_SHARE_DENY_WRITE
);
4651 else /* (flags & RD_STATE) && ZERO_STATEID(stateid) */
4652 return nfs4_share_conflict(current_fh
,
4653 NFS4_SHARE_DENY_READ
);
4657 * Allow READ/WRITE during grace period on recovered state only for files
4658 * that are not able to provide mandatory locking.
4661 grace_disallows_io(struct net
*net
, struct inode
*inode
)
4663 return opens_in_grace(net
) && mandatory_lock(inode
);
4666 /* Returns true iff a is later than b: */
4667 static bool stateid_generation_after(stateid_t
*a
, stateid_t
*b
)
4669 return (s32
)(a
->si_generation
- b
->si_generation
) > 0;
4672 static __be32
check_stateid_generation(stateid_t
*in
, stateid_t
*ref
, bool has_session
)
4675 * When sessions are used the stateid generation number is ignored
4678 if (has_session
&& in
->si_generation
== 0)
4681 if (in
->si_generation
== ref
->si_generation
)
4684 /* If the client sends us a stateid from the future, it's buggy: */
4685 if (stateid_generation_after(in
, ref
))
4686 return nfserr_bad_stateid
;
4688 * However, we could see a stateid from the past, even from a
4689 * non-buggy client. For example, if the client sends a lock
4690 * while some IO is outstanding, the lock may bump si_generation
4691 * while the IO is still in flight. The client could avoid that
4692 * situation by waiting for responses on all the IO requests,
4693 * but better performance may result in retrying IO that
4694 * receives an old_stateid error if requests are rarely
4695 * reordered in flight:
4697 return nfserr_old_stateid
;
4700 static __be32
nfsd4_check_openowner_confirmed(struct nfs4_ol_stateid
*ols
)
4702 if (ols
->st_stateowner
->so_is_open_owner
&&
4703 !(openowner(ols
->st_stateowner
)->oo_flags
& NFS4_OO_CONFIRMED
))
4704 return nfserr_bad_stateid
;
4708 static __be32
nfsd4_validate_stateid(struct nfs4_client
*cl
, stateid_t
*stateid
)
4710 struct nfs4_stid
*s
;
4711 __be32 status
= nfserr_bad_stateid
;
4713 if (ZERO_STATEID(stateid
) || ONE_STATEID(stateid
) ||
4714 CLOSE_STATEID(stateid
))
4716 /* Client debugging aid. */
4717 if (!same_clid(&stateid
->si_opaque
.so_clid
, &cl
->cl_clientid
)) {
4718 char addr_str
[INET6_ADDRSTRLEN
];
4719 rpc_ntop((struct sockaddr
*)&cl
->cl_addr
, addr_str
,
4721 pr_warn_ratelimited("NFSD: client %s testing state ID "
4722 "with incorrect client ID\n", addr_str
);
4725 spin_lock(&cl
->cl_lock
);
4726 s
= find_stateid_locked(cl
, stateid
);
4729 status
= check_stateid_generation(stateid
, &s
->sc_stateid
, 1);
4732 switch (s
->sc_type
) {
4733 case NFS4_DELEG_STID
:
4736 case NFS4_REVOKED_DELEG_STID
:
4737 status
= nfserr_deleg_revoked
;
4739 case NFS4_OPEN_STID
:
4740 case NFS4_LOCK_STID
:
4741 status
= nfsd4_check_openowner_confirmed(openlockstateid(s
));
4744 printk("unknown stateid type %x\n", s
->sc_type
);
4746 case NFS4_CLOSED_STID
:
4747 case NFS4_CLOSED_DELEG_STID
:
4748 status
= nfserr_bad_stateid
;
4751 spin_unlock(&cl
->cl_lock
);
4756 nfsd4_lookup_stateid(struct nfsd4_compound_state
*cstate
,
4757 stateid_t
*stateid
, unsigned char typemask
,
4758 struct nfs4_stid
**s
, struct nfsd_net
*nn
)
4761 bool return_revoked
= false;
4764 * only return revoked delegations if explicitly asked.
4765 * otherwise we report revoked or bad_stateid status.
4767 if (typemask
& NFS4_REVOKED_DELEG_STID
)
4768 return_revoked
= true;
4769 else if (typemask
& NFS4_DELEG_STID
)
4770 typemask
|= NFS4_REVOKED_DELEG_STID
;
4772 if (ZERO_STATEID(stateid
) || ONE_STATEID(stateid
) ||
4773 CLOSE_STATEID(stateid
))
4774 return nfserr_bad_stateid
;
4775 status
= lookup_clientid(&stateid
->si_opaque
.so_clid
, cstate
, nn
);
4776 if (status
== nfserr_stale_clientid
) {
4777 if (cstate
->session
)
4778 return nfserr_bad_stateid
;
4779 return nfserr_stale_stateid
;
4783 *s
= find_stateid_by_type(cstate
->clp
, stateid
, typemask
);
4785 return nfserr_bad_stateid
;
4786 if (((*s
)->sc_type
== NFS4_REVOKED_DELEG_STID
) && !return_revoked
) {
4788 if (cstate
->minorversion
)
4789 return nfserr_deleg_revoked
;
4790 return nfserr_bad_stateid
;
4795 static struct file
*
4796 nfs4_find_file(struct nfs4_stid
*s
, int flags
)
4801 switch (s
->sc_type
) {
4802 case NFS4_DELEG_STID
:
4803 if (WARN_ON_ONCE(!s
->sc_file
->fi_deleg_file
))
4805 return get_file(s
->sc_file
->fi_deleg_file
);
4806 case NFS4_OPEN_STID
:
4807 case NFS4_LOCK_STID
:
4808 if (flags
& RD_STATE
)
4809 return find_readable_file(s
->sc_file
);
4811 return find_writeable_file(s
->sc_file
);
4819 nfs4_check_olstateid(struct svc_fh
*fhp
, struct nfs4_ol_stateid
*ols
, int flags
)
4823 status
= nfsd4_check_openowner_confirmed(ols
);
4826 return nfs4_check_openmode(ols
, flags
);
4830 nfs4_check_file(struct svc_rqst
*rqstp
, struct svc_fh
*fhp
, struct nfs4_stid
*s
,
4831 struct file
**filpp
, bool *tmp_file
, int flags
)
4833 int acc
= (flags
& RD_STATE
) ? NFSD_MAY_READ
: NFSD_MAY_WRITE
;
4837 file
= nfs4_find_file(s
, flags
);
4839 status
= nfsd_permission(rqstp
, fhp
->fh_export
, fhp
->fh_dentry
,
4840 acc
| NFSD_MAY_OWNER_OVERRIDE
);
4848 status
= nfsd_open(rqstp
, fhp
, S_IFREG
, acc
, filpp
);
4860 * Checks for stateid operations
4863 nfs4_preprocess_stateid_op(struct svc_rqst
*rqstp
,
4864 struct nfsd4_compound_state
*cstate
, stateid_t
*stateid
,
4865 int flags
, struct file
**filpp
, bool *tmp_file
)
4867 struct svc_fh
*fhp
= &cstate
->current_fh
;
4868 struct inode
*ino
= d_inode(fhp
->fh_dentry
);
4869 struct net
*net
= SVC_NET(rqstp
);
4870 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
4871 struct nfs4_stid
*s
= NULL
;
4879 if (grace_disallows_io(net
, ino
))
4880 return nfserr_grace
;
4882 if (ZERO_STATEID(stateid
) || ONE_STATEID(stateid
)) {
4883 status
= check_special_stateids(net
, fhp
, stateid
, flags
);
4887 status
= nfsd4_lookup_stateid(cstate
, stateid
,
4888 NFS4_DELEG_STID
|NFS4_OPEN_STID
|NFS4_LOCK_STID
,
4892 status
= check_stateid_generation(stateid
, &s
->sc_stateid
,
4893 nfsd4_has_session(cstate
));
4897 switch (s
->sc_type
) {
4898 case NFS4_DELEG_STID
:
4899 status
= nfs4_check_delegmode(delegstateid(s
), flags
);
4901 case NFS4_OPEN_STID
:
4902 case NFS4_LOCK_STID
:
4903 status
= nfs4_check_olstateid(fhp
, openlockstateid(s
), flags
);
4906 status
= nfserr_bad_stateid
;
4911 status
= nfs4_check_fh(fhp
, s
);
4914 if (!status
&& filpp
)
4915 status
= nfs4_check_file(rqstp
, fhp
, s
, filpp
, tmp_file
, flags
);
4923 * Test if the stateid is valid
4926 nfsd4_test_stateid(struct svc_rqst
*rqstp
, struct nfsd4_compound_state
*cstate
,
4927 struct nfsd4_test_stateid
*test_stateid
)
4929 struct nfsd4_test_stateid_id
*stateid
;
4930 struct nfs4_client
*cl
= cstate
->session
->se_client
;
4932 list_for_each_entry(stateid
, &test_stateid
->ts_stateid_list
, ts_id_list
)
4933 stateid
->ts_id_status
=
4934 nfsd4_validate_stateid(cl
, &stateid
->ts_id_stateid
);
4940 nfsd4_free_lock_stateid(stateid_t
*stateid
, struct nfs4_stid
*s
)
4942 struct nfs4_ol_stateid
*stp
= openlockstateid(s
);
4945 mutex_lock(&stp
->st_mutex
);
4947 ret
= check_stateid_generation(stateid
, &s
->sc_stateid
, 1);
4951 ret
= nfserr_locks_held
;
4952 if (check_for_locks(stp
->st_stid
.sc_file
,
4953 lockowner(stp
->st_stateowner
)))
4956 release_lock_stateid(stp
);
4960 mutex_unlock(&stp
->st_mutex
);
4966 nfsd4_free_stateid(struct svc_rqst
*rqstp
, struct nfsd4_compound_state
*cstate
,
4967 struct nfsd4_free_stateid
*free_stateid
)
4969 stateid_t
*stateid
= &free_stateid
->fr_stateid
;
4970 struct nfs4_stid
*s
;
4971 struct nfs4_delegation
*dp
;
4972 struct nfs4_client
*cl
= cstate
->session
->se_client
;
4973 __be32 ret
= nfserr_bad_stateid
;
4975 spin_lock(&cl
->cl_lock
);
4976 s
= find_stateid_locked(cl
, stateid
);
4979 switch (s
->sc_type
) {
4980 case NFS4_DELEG_STID
:
4981 ret
= nfserr_locks_held
;
4983 case NFS4_OPEN_STID
:
4984 ret
= check_stateid_generation(stateid
, &s
->sc_stateid
, 1);
4987 ret
= nfserr_locks_held
;
4989 case NFS4_LOCK_STID
:
4990 atomic_inc(&s
->sc_count
);
4991 spin_unlock(&cl
->cl_lock
);
4992 ret
= nfsd4_free_lock_stateid(stateid
, s
);
4994 case NFS4_REVOKED_DELEG_STID
:
4995 dp
= delegstateid(s
);
4996 list_del_init(&dp
->dl_recall_lru
);
4997 spin_unlock(&cl
->cl_lock
);
5001 /* Default falls through and returns nfserr_bad_stateid */
5004 spin_unlock(&cl
->cl_lock
);
5012 return (type
== NFS4_READW_LT
|| type
== NFS4_READ_LT
) ?
5013 RD_STATE
: WR_STATE
;
5016 static __be32
nfs4_seqid_op_checks(struct nfsd4_compound_state
*cstate
, stateid_t
*stateid
, u32 seqid
, struct nfs4_ol_stateid
*stp
)
5018 struct svc_fh
*current_fh
= &cstate
->current_fh
;
5019 struct nfs4_stateowner
*sop
= stp
->st_stateowner
;
5022 status
= nfsd4_check_seqid(cstate
, sop
, seqid
);
5025 status
= nfsd4_lock_ol_stateid(stp
);
5026 if (status
!= nfs_ok
)
5028 status
= check_stateid_generation(stateid
, &stp
->st_stid
.sc_stateid
, nfsd4_has_session(cstate
));
5029 if (status
== nfs_ok
)
5030 status
= nfs4_check_fh(current_fh
, &stp
->st_stid
);
5031 if (status
!= nfs_ok
)
5032 mutex_unlock(&stp
->st_mutex
);
5037 * Checks for sequence id mutating operations.
5040 nfs4_preprocess_seqid_op(struct nfsd4_compound_state
*cstate
, u32 seqid
,
5041 stateid_t
*stateid
, char typemask
,
5042 struct nfs4_ol_stateid
**stpp
,
5043 struct nfsd_net
*nn
)
5046 struct nfs4_stid
*s
;
5047 struct nfs4_ol_stateid
*stp
= NULL
;
5049 dprintk("NFSD: %s: seqid=%d stateid = " STATEID_FMT
"\n", __func__
,
5050 seqid
, STATEID_VAL(stateid
));
5053 status
= nfsd4_lookup_stateid(cstate
, stateid
, typemask
, &s
, nn
);
5056 stp
= openlockstateid(s
);
5057 nfsd4_cstate_assign_replay(cstate
, stp
->st_stateowner
);
5059 status
= nfs4_seqid_op_checks(cstate
, stateid
, seqid
, stp
);
5063 nfs4_put_stid(&stp
->st_stid
);
5067 static __be32
nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state
*cstate
, u32 seqid
,
5068 stateid_t
*stateid
, struct nfs4_ol_stateid
**stpp
, struct nfsd_net
*nn
)
5071 struct nfs4_openowner
*oo
;
5072 struct nfs4_ol_stateid
*stp
;
5074 status
= nfs4_preprocess_seqid_op(cstate
, seqid
, stateid
,
5075 NFS4_OPEN_STID
, &stp
, nn
);
5078 oo
= openowner(stp
->st_stateowner
);
5079 if (!(oo
->oo_flags
& NFS4_OO_CONFIRMED
)) {
5080 mutex_unlock(&stp
->st_mutex
);
5081 nfs4_put_stid(&stp
->st_stid
);
5082 return nfserr_bad_stateid
;
5089 nfsd4_open_confirm(struct svc_rqst
*rqstp
, struct nfsd4_compound_state
*cstate
,
5090 struct nfsd4_open_confirm
*oc
)
5093 struct nfs4_openowner
*oo
;
5094 struct nfs4_ol_stateid
*stp
;
5095 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
5097 dprintk("NFSD: nfsd4_open_confirm on file %pd\n",
5098 cstate
->current_fh
.fh_dentry
);
5100 status
= fh_verify(rqstp
, &cstate
->current_fh
, S_IFREG
, 0);
5104 status
= nfs4_preprocess_seqid_op(cstate
,
5105 oc
->oc_seqid
, &oc
->oc_req_stateid
,
5106 NFS4_OPEN_STID
, &stp
, nn
);
5109 oo
= openowner(stp
->st_stateowner
);
5110 status
= nfserr_bad_stateid
;
5111 if (oo
->oo_flags
& NFS4_OO_CONFIRMED
) {
5112 mutex_unlock(&stp
->st_mutex
);
5115 oo
->oo_flags
|= NFS4_OO_CONFIRMED
;
5116 nfs4_inc_and_copy_stateid(&oc
->oc_resp_stateid
, &stp
->st_stid
);
5117 mutex_unlock(&stp
->st_mutex
);
5118 dprintk("NFSD: %s: success, seqid=%d stateid=" STATEID_FMT
"\n",
5119 __func__
, oc
->oc_seqid
, STATEID_VAL(&stp
->st_stid
.sc_stateid
));
5121 nfsd4_client_record_create(oo
->oo_owner
.so_client
);
5124 nfs4_put_stid(&stp
->st_stid
);
5126 nfsd4_bump_seqid(cstate
, status
);
5130 static inline void nfs4_stateid_downgrade_bit(struct nfs4_ol_stateid
*stp
, u32 access
)
5132 if (!test_access(access
, stp
))
5134 nfs4_file_put_access(stp
->st_stid
.sc_file
, access
);
5135 clear_access(access
, stp
);
5138 static inline void nfs4_stateid_downgrade(struct nfs4_ol_stateid
*stp
, u32 to_access
)
5140 switch (to_access
) {
5141 case NFS4_SHARE_ACCESS_READ
:
5142 nfs4_stateid_downgrade_bit(stp
, NFS4_SHARE_ACCESS_WRITE
);
5143 nfs4_stateid_downgrade_bit(stp
, NFS4_SHARE_ACCESS_BOTH
);
5145 case NFS4_SHARE_ACCESS_WRITE
:
5146 nfs4_stateid_downgrade_bit(stp
, NFS4_SHARE_ACCESS_READ
);
5147 nfs4_stateid_downgrade_bit(stp
, NFS4_SHARE_ACCESS_BOTH
);
5149 case NFS4_SHARE_ACCESS_BOTH
:
5157 nfsd4_open_downgrade(struct svc_rqst
*rqstp
,
5158 struct nfsd4_compound_state
*cstate
,
5159 struct nfsd4_open_downgrade
*od
)
5162 struct nfs4_ol_stateid
*stp
;
5163 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
5165 dprintk("NFSD: nfsd4_open_downgrade on file %pd\n",
5166 cstate
->current_fh
.fh_dentry
);
5168 /* We don't yet support WANT bits: */
5169 if (od
->od_deleg_want
)
5170 dprintk("NFSD: %s: od_deleg_want=0x%x ignored\n", __func__
,
5173 status
= nfs4_preprocess_confirmed_seqid_op(cstate
, od
->od_seqid
,
5174 &od
->od_stateid
, &stp
, nn
);
5177 status
= nfserr_inval
;
5178 if (!test_access(od
->od_share_access
, stp
)) {
5179 dprintk("NFSD: access not a subset of current bitmap: 0x%hhx, input access=%08x\n",
5180 stp
->st_access_bmap
, od
->od_share_access
);
5183 if (!test_deny(od
->od_share_deny
, stp
)) {
5184 dprintk("NFSD: deny not a subset of current bitmap: 0x%hhx, input deny=%08x\n",
5185 stp
->st_deny_bmap
, od
->od_share_deny
);
5188 nfs4_stateid_downgrade(stp
, od
->od_share_access
);
5189 reset_union_bmap_deny(od
->od_share_deny
, stp
);
5190 nfs4_inc_and_copy_stateid(&od
->od_stateid
, &stp
->st_stid
);
5193 mutex_unlock(&stp
->st_mutex
);
5194 nfs4_put_stid(&stp
->st_stid
);
5196 nfsd4_bump_seqid(cstate
, status
);
5200 static void nfsd4_close_open_stateid(struct nfs4_ol_stateid
*s
)
5202 struct nfs4_client
*clp
= s
->st_stid
.sc_client
;
5204 LIST_HEAD(reaplist
);
5206 spin_lock(&clp
->cl_lock
);
5207 unhashed
= unhash_open_stateid(s
, &reaplist
);
5209 if (clp
->cl_minorversion
) {
5211 put_ol_stateid_locked(s
, &reaplist
);
5212 spin_unlock(&clp
->cl_lock
);
5213 free_ol_stateid_reaplist(&reaplist
);
5215 spin_unlock(&clp
->cl_lock
);
5216 free_ol_stateid_reaplist(&reaplist
);
5218 move_to_close_lru(s
, clp
->net
);
5223 * nfs4_unlock_state() called after encode
5226 nfsd4_close(struct svc_rqst
*rqstp
, struct nfsd4_compound_state
*cstate
,
5227 struct nfsd4_close
*close
)
5230 struct nfs4_ol_stateid
*stp
;
5231 struct net
*net
= SVC_NET(rqstp
);
5232 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
5234 dprintk("NFSD: nfsd4_close on file %pd\n",
5235 cstate
->current_fh
.fh_dentry
);
5237 status
= nfs4_preprocess_seqid_op(cstate
, close
->cl_seqid
,
5239 NFS4_OPEN_STID
|NFS4_CLOSED_STID
,
5241 nfsd4_bump_seqid(cstate
, status
);
5245 stp
->st_stid
.sc_type
= NFS4_CLOSED_STID
;
5246 nfs4_inc_and_copy_stateid(&close
->cl_stateid
, &stp
->st_stid
);
5248 nfsd4_close_open_stateid(stp
);
5249 mutex_unlock(&stp
->st_mutex
);
5251 /* See RFC5661 sectionm 18.2.4 */
5252 if (stp
->st_stid
.sc_client
->cl_minorversion
)
5253 memcpy(&close
->cl_stateid
, &close_stateid
,
5254 sizeof(close
->cl_stateid
));
5256 /* put reference from nfs4_preprocess_seqid_op */
5257 nfs4_put_stid(&stp
->st_stid
);
5263 nfsd4_delegreturn(struct svc_rqst
*rqstp
, struct nfsd4_compound_state
*cstate
,
5264 struct nfsd4_delegreturn
*dr
)
5266 struct nfs4_delegation
*dp
;
5267 stateid_t
*stateid
= &dr
->dr_stateid
;
5268 struct nfs4_stid
*s
;
5270 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
5272 if ((status
= fh_verify(rqstp
, &cstate
->current_fh
, S_IFREG
, 0)))
5275 status
= nfsd4_lookup_stateid(cstate
, stateid
, NFS4_DELEG_STID
, &s
, nn
);
5278 dp
= delegstateid(s
);
5279 status
= check_stateid_generation(stateid
, &dp
->dl_stid
.sc_stateid
, nfsd4_has_session(cstate
));
5283 destroy_delegation(dp
);
5285 nfs4_put_stid(&dp
->dl_stid
);
5291 end_offset(u64 start
, u64 len
)
5296 return end
>= start
? end
: NFS4_MAX_UINT64
;
5299 /* last octet in a range */
5301 last_byte_offset(u64 start
, u64 len
)
5307 return end
> start
? end
- 1: NFS4_MAX_UINT64
;
5311 * TODO: Linux file offsets are _signed_ 64-bit quantities, which means that
5312 * we can't properly handle lock requests that go beyond the (2^63 - 1)-th
5313 * byte, because of sign extension problems. Since NFSv4 calls for 64-bit
5314 * locking, this prevents us from being completely protocol-compliant. The
5315 * real solution to this problem is to start using unsigned file offsets in
5316 * the VFS, but this is a very deep change!
5319 nfs4_transform_lock_offset(struct file_lock
*lock
)
5321 if (lock
->fl_start
< 0)
5322 lock
->fl_start
= OFFSET_MAX
;
5323 if (lock
->fl_end
< 0)
5324 lock
->fl_end
= OFFSET_MAX
;
5328 nfsd4_fl_get_owner(fl_owner_t owner
)
5330 struct nfs4_lockowner
*lo
= (struct nfs4_lockowner
*)owner
;
5332 nfs4_get_stateowner(&lo
->lo_owner
);
5337 nfsd4_fl_put_owner(fl_owner_t owner
)
5339 struct nfs4_lockowner
*lo
= (struct nfs4_lockowner
*)owner
;
5342 nfs4_put_stateowner(&lo
->lo_owner
);
5345 static const struct lock_manager_operations nfsd_posix_mng_ops
= {
5346 .lm_get_owner
= nfsd4_fl_get_owner
,
5347 .lm_put_owner
= nfsd4_fl_put_owner
,
5351 nfs4_set_lock_denied(struct file_lock
*fl
, struct nfsd4_lock_denied
*deny
)
5353 struct nfs4_lockowner
*lo
;
5355 if (fl
->fl_lmops
== &nfsd_posix_mng_ops
) {
5356 lo
= (struct nfs4_lockowner
*) fl
->fl_owner
;
5357 deny
->ld_owner
.data
= kmemdup(lo
->lo_owner
.so_owner
.data
,
5358 lo
->lo_owner
.so_owner
.len
, GFP_KERNEL
);
5359 if (!deny
->ld_owner
.data
)
5360 /* We just don't care that much */
5362 deny
->ld_owner
.len
= lo
->lo_owner
.so_owner
.len
;
5363 deny
->ld_clientid
= lo
->lo_owner
.so_client
->cl_clientid
;
5366 deny
->ld_owner
.len
= 0;
5367 deny
->ld_owner
.data
= NULL
;
5368 deny
->ld_clientid
.cl_boot
= 0;
5369 deny
->ld_clientid
.cl_id
= 0;
5371 deny
->ld_start
= fl
->fl_start
;
5372 deny
->ld_length
= NFS4_MAX_UINT64
;
5373 if (fl
->fl_end
!= NFS4_MAX_UINT64
)
5374 deny
->ld_length
= fl
->fl_end
- fl
->fl_start
+ 1;
5375 deny
->ld_type
= NFS4_READ_LT
;
5376 if (fl
->fl_type
!= F_RDLCK
)
5377 deny
->ld_type
= NFS4_WRITE_LT
;
5380 static struct nfs4_lockowner
*
5381 find_lockowner_str_locked(struct nfs4_client
*clp
, struct xdr_netobj
*owner
)
5383 unsigned int strhashval
= ownerstr_hashval(owner
);
5384 struct nfs4_stateowner
*so
;
5386 lockdep_assert_held(&clp
->cl_lock
);
5388 list_for_each_entry(so
, &clp
->cl_ownerstr_hashtbl
[strhashval
],
5390 if (so
->so_is_open_owner
)
5392 if (same_owner_str(so
, owner
))
5393 return lockowner(nfs4_get_stateowner(so
));
5398 static struct nfs4_lockowner
*
5399 find_lockowner_str(struct nfs4_client
*clp
, struct xdr_netobj
*owner
)
5401 struct nfs4_lockowner
*lo
;
5403 spin_lock(&clp
->cl_lock
);
5404 lo
= find_lockowner_str_locked(clp
, owner
);
5405 spin_unlock(&clp
->cl_lock
);
5409 static void nfs4_unhash_lockowner(struct nfs4_stateowner
*sop
)
5411 unhash_lockowner_locked(lockowner(sop
));
5414 static void nfs4_free_lockowner(struct nfs4_stateowner
*sop
)
5416 struct nfs4_lockowner
*lo
= lockowner(sop
);
5418 kmem_cache_free(lockowner_slab
, lo
);
5421 static const struct nfs4_stateowner_operations lockowner_ops
= {
5422 .so_unhash
= nfs4_unhash_lockowner
,
5423 .so_free
= nfs4_free_lockowner
,
5427 * Alloc a lock owner structure.
5428 * Called in nfsd4_lock - therefore, OPEN and OPEN_CONFIRM (if needed) has
5431 * strhashval = ownerstr_hashval
5433 static struct nfs4_lockowner
*
5434 alloc_init_lock_stateowner(unsigned int strhashval
, struct nfs4_client
*clp
,
5435 struct nfs4_ol_stateid
*open_stp
,
5436 struct nfsd4_lock
*lock
)
5438 struct nfs4_lockowner
*lo
, *ret
;
5440 lo
= alloc_stateowner(lockowner_slab
, &lock
->lk_new_owner
, clp
);
5443 INIT_LIST_HEAD(&lo
->lo_owner
.so_stateids
);
5444 lo
->lo_owner
.so_is_open_owner
= 0;
5445 lo
->lo_owner
.so_seqid
= lock
->lk_new_lock_seqid
;
5446 lo
->lo_owner
.so_ops
= &lockowner_ops
;
5447 spin_lock(&clp
->cl_lock
);
5448 ret
= find_lockowner_str_locked(clp
, &lock
->lk_new_owner
);
5450 list_add(&lo
->lo_owner
.so_strhash
,
5451 &clp
->cl_ownerstr_hashtbl
[strhashval
]);
5454 nfs4_free_stateowner(&lo
->lo_owner
);
5456 spin_unlock(&clp
->cl_lock
);
5461 init_lock_stateid(struct nfs4_ol_stateid
*stp
, struct nfs4_lockowner
*lo
,
5462 struct nfs4_file
*fp
, struct inode
*inode
,
5463 struct nfs4_ol_stateid
*open_stp
)
5465 struct nfs4_client
*clp
= lo
->lo_owner
.so_client
;
5467 lockdep_assert_held(&clp
->cl_lock
);
5469 atomic_inc(&stp
->st_stid
.sc_count
);
5470 stp
->st_stid
.sc_type
= NFS4_LOCK_STID
;
5471 stp
->st_stateowner
= nfs4_get_stateowner(&lo
->lo_owner
);
5473 stp
->st_stid
.sc_file
= fp
;
5474 stp
->st_access_bmap
= 0;
5475 stp
->st_deny_bmap
= open_stp
->st_deny_bmap
;
5476 stp
->st_openstp
= open_stp
;
5477 mutex_init(&stp
->st_mutex
);
5478 list_add(&stp
->st_locks
, &open_stp
->st_locks
);
5479 list_add(&stp
->st_perstateowner
, &lo
->lo_owner
.so_stateids
);
5480 spin_lock(&fp
->fi_lock
);
5481 list_add(&stp
->st_perfile
, &fp
->fi_stateids
);
5482 spin_unlock(&fp
->fi_lock
);
5485 static struct nfs4_ol_stateid
*
5486 find_lock_stateid(struct nfs4_lockowner
*lo
, struct nfs4_file
*fp
)
5488 struct nfs4_ol_stateid
*lst
;
5489 struct nfs4_client
*clp
= lo
->lo_owner
.so_client
;
5491 lockdep_assert_held(&clp
->cl_lock
);
5493 list_for_each_entry(lst
, &lo
->lo_owner
.so_stateids
, st_perstateowner
) {
5494 if (lst
->st_stid
.sc_file
== fp
) {
5495 atomic_inc(&lst
->st_stid
.sc_count
);
5502 static struct nfs4_ol_stateid
*
5503 find_or_create_lock_stateid(struct nfs4_lockowner
*lo
, struct nfs4_file
*fi
,
5504 struct inode
*inode
, struct nfs4_ol_stateid
*ost
,
5507 struct nfs4_stid
*ns
= NULL
;
5508 struct nfs4_ol_stateid
*lst
;
5509 struct nfs4_openowner
*oo
= openowner(ost
->st_stateowner
);
5510 struct nfs4_client
*clp
= oo
->oo_owner
.so_client
;
5512 spin_lock(&clp
->cl_lock
);
5513 lst
= find_lock_stateid(lo
, fi
);
5515 spin_unlock(&clp
->cl_lock
);
5516 ns
= nfs4_alloc_stid(clp
, stateid_slab
, nfs4_free_lock_stateid
);
5520 spin_lock(&clp
->cl_lock
);
5521 lst
= find_lock_stateid(lo
, fi
);
5523 lst
= openlockstateid(ns
);
5524 init_lock_stateid(lst
, lo
, fi
, inode
, ost
);
5529 spin_unlock(&clp
->cl_lock
);
5536 check_lock_length(u64 offset
, u64 length
)
5538 return ((length
== 0) || ((length
!= NFS4_MAX_UINT64
) &&
5539 (length
> ~offset
)));
5542 static void get_lock_access(struct nfs4_ol_stateid
*lock_stp
, u32 access
)
5544 struct nfs4_file
*fp
= lock_stp
->st_stid
.sc_file
;
5546 lockdep_assert_held(&fp
->fi_lock
);
5548 if (test_access(access
, lock_stp
))
5550 __nfs4_file_get_access(fp
, access
);
5551 set_access(access
, lock_stp
);
5555 lookup_or_create_lock_state(struct nfsd4_compound_state
*cstate
,
5556 struct nfs4_ol_stateid
*ost
,
5557 struct nfsd4_lock
*lock
,
5558 struct nfs4_ol_stateid
**plst
, bool *new)
5561 struct nfs4_file
*fi
= ost
->st_stid
.sc_file
;
5562 struct nfs4_openowner
*oo
= openowner(ost
->st_stateowner
);
5563 struct nfs4_client
*cl
= oo
->oo_owner
.so_client
;
5564 struct inode
*inode
= d_inode(cstate
->current_fh
.fh_dentry
);
5565 struct nfs4_lockowner
*lo
;
5566 struct nfs4_ol_stateid
*lst
;
5567 unsigned int strhashval
;
5570 lo
= find_lockowner_str(cl
, &lock
->lk_new_owner
);
5572 strhashval
= ownerstr_hashval(&lock
->lk_new_owner
);
5573 lo
= alloc_init_lock_stateowner(strhashval
, cl
, ost
, lock
);
5575 return nfserr_jukebox
;
5577 /* with an existing lockowner, seqids must be the same */
5578 status
= nfserr_bad_seqid
;
5579 if (!cstate
->minorversion
&&
5580 lock
->lk_new_lock_seqid
!= lo
->lo_owner
.so_seqid
)
5585 lst
= find_or_create_lock_stateid(lo
, fi
, inode
, ost
, new);
5587 status
= nfserr_jukebox
;
5591 mutex_lock(&lst
->st_mutex
);
5593 /* See if it's still hashed to avoid race with FREE_STATEID */
5594 spin_lock(&cl
->cl_lock
);
5595 hashed
= !list_empty(&lst
->st_perfile
);
5596 spin_unlock(&cl
->cl_lock
);
5599 mutex_unlock(&lst
->st_mutex
);
5600 nfs4_put_stid(&lst
->st_stid
);
5606 nfs4_put_stateowner(&lo
->lo_owner
);
5614 nfsd4_lock(struct svc_rqst
*rqstp
, struct nfsd4_compound_state
*cstate
,
5615 struct nfsd4_lock
*lock
)
5617 struct nfs4_openowner
*open_sop
= NULL
;
5618 struct nfs4_lockowner
*lock_sop
= NULL
;
5619 struct nfs4_ol_stateid
*lock_stp
= NULL
;
5620 struct nfs4_ol_stateid
*open_stp
= NULL
;
5621 struct nfs4_file
*fp
;
5622 struct file
*filp
= NULL
;
5623 struct file_lock
*file_lock
= NULL
;
5624 struct file_lock
*conflock
= NULL
;
5629 struct net
*net
= SVC_NET(rqstp
);
5630 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
5632 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
5633 (long long) lock
->lk_offset
,
5634 (long long) lock
->lk_length
);
5636 if (check_lock_length(lock
->lk_offset
, lock
->lk_length
))
5637 return nfserr_inval
;
5639 if ((status
= fh_verify(rqstp
, &cstate
->current_fh
,
5640 S_IFREG
, NFSD_MAY_LOCK
))) {
5641 dprintk("NFSD: nfsd4_lock: permission denied!\n");
5645 if (lock
->lk_is_new
) {
5646 if (nfsd4_has_session(cstate
))
5647 /* See rfc 5661 18.10.3: given clientid is ignored: */
5648 memcpy(&lock
->lk_new_clientid
,
5649 &cstate
->session
->se_client
->cl_clientid
,
5650 sizeof(clientid_t
));
5652 status
= nfserr_stale_clientid
;
5653 if (STALE_CLIENTID(&lock
->lk_new_clientid
, nn
))
5656 /* validate and update open stateid and open seqid */
5657 status
= nfs4_preprocess_confirmed_seqid_op(cstate
,
5658 lock
->lk_new_open_seqid
,
5659 &lock
->lk_new_open_stateid
,
5663 mutex_unlock(&open_stp
->st_mutex
);
5664 open_sop
= openowner(open_stp
->st_stateowner
);
5665 status
= nfserr_bad_stateid
;
5666 if (!same_clid(&open_sop
->oo_owner
.so_client
->cl_clientid
,
5667 &lock
->lk_new_clientid
))
5669 status
= lookup_or_create_lock_state(cstate
, open_stp
, lock
,
5672 status
= nfs4_preprocess_seqid_op(cstate
,
5673 lock
->lk_old_lock_seqid
,
5674 &lock
->lk_old_lock_stateid
,
5675 NFS4_LOCK_STID
, &lock_stp
, nn
);
5679 lock_sop
= lockowner(lock_stp
->st_stateowner
);
5681 lkflg
= setlkflg(lock
->lk_type
);
5682 status
= nfs4_check_openmode(lock_stp
, lkflg
);
5686 status
= nfserr_grace
;
5687 if (locks_in_grace(net
) && !lock
->lk_reclaim
)
5689 status
= nfserr_no_grace
;
5690 if (!locks_in_grace(net
) && lock
->lk_reclaim
)
5693 file_lock
= locks_alloc_lock();
5695 dprintk("NFSD: %s: unable to allocate lock!\n", __func__
);
5696 status
= nfserr_jukebox
;
5700 fp
= lock_stp
->st_stid
.sc_file
;
5701 switch (lock
->lk_type
) {
5704 spin_lock(&fp
->fi_lock
);
5705 filp
= find_readable_file_locked(fp
);
5707 get_lock_access(lock_stp
, NFS4_SHARE_ACCESS_READ
);
5708 spin_unlock(&fp
->fi_lock
);
5709 file_lock
->fl_type
= F_RDLCK
;
5712 case NFS4_WRITEW_LT
:
5713 spin_lock(&fp
->fi_lock
);
5714 filp
= find_writeable_file_locked(fp
);
5716 get_lock_access(lock_stp
, NFS4_SHARE_ACCESS_WRITE
);
5717 spin_unlock(&fp
->fi_lock
);
5718 file_lock
->fl_type
= F_WRLCK
;
5721 status
= nfserr_inval
;
5725 status
= nfserr_openmode
;
5729 file_lock
->fl_owner
= (fl_owner_t
)lockowner(nfs4_get_stateowner(&lock_sop
->lo_owner
));
5730 file_lock
->fl_pid
= current
->tgid
;
5731 file_lock
->fl_file
= filp
;
5732 file_lock
->fl_flags
= FL_POSIX
;
5733 file_lock
->fl_lmops
= &nfsd_posix_mng_ops
;
5734 file_lock
->fl_start
= lock
->lk_offset
;
5735 file_lock
->fl_end
= last_byte_offset(lock
->lk_offset
, lock
->lk_length
);
5736 nfs4_transform_lock_offset(file_lock
);
5738 conflock
= locks_alloc_lock();
5740 dprintk("NFSD: %s: unable to allocate lock!\n", __func__
);
5741 status
= nfserr_jukebox
;
5745 err
= vfs_lock_file(filp
, F_SETLK
, file_lock
, conflock
);
5747 case 0: /* success! */
5748 nfs4_inc_and_copy_stateid(&lock
->lk_resp_stateid
, &lock_stp
->st_stid
);
5751 case (EAGAIN
): /* conflock holds conflicting lock */
5752 status
= nfserr_denied
;
5753 dprintk("NFSD: nfsd4_lock: conflicting lock found!\n");
5754 nfs4_set_lock_denied(conflock
, &lock
->lk_denied
);
5757 status
= nfserr_deadlock
;
5760 dprintk("NFSD: nfsd4_lock: vfs_lock_file() failed! status %d\n",err
);
5761 status
= nfserrno(err
);
5768 /* Bump seqid manually if the 4.0 replay owner is openowner */
5769 if (cstate
->replay_owner
&&
5770 cstate
->replay_owner
!= &lock_sop
->lo_owner
&&
5771 seqid_mutating_err(ntohl(status
)))
5772 lock_sop
->lo_owner
.so_seqid
++;
5774 mutex_unlock(&lock_stp
->st_mutex
);
5777 * If this is a new, never-before-used stateid, and we are
5778 * returning an error, then just go ahead and release it.
5781 release_lock_stateid(lock_stp
);
5783 nfs4_put_stid(&lock_stp
->st_stid
);
5786 nfs4_put_stid(&open_stp
->st_stid
);
5787 nfsd4_bump_seqid(cstate
, status
);
5789 locks_free_lock(file_lock
);
5791 locks_free_lock(conflock
);
5796 * The NFSv4 spec allows a client to do a LOCKT without holding an OPEN,
5797 * so we do a temporary open here just to get an open file to pass to
5798 * vfs_test_lock. (Arguably perhaps test_lock should be done with an
5801 static __be32
nfsd_test_lock(struct svc_rqst
*rqstp
, struct svc_fh
*fhp
, struct file_lock
*lock
)
5804 __be32 err
= nfsd_open(rqstp
, fhp
, S_IFREG
, NFSD_MAY_READ
, &file
);
5806 err
= nfserrno(vfs_test_lock(file
, lock
));
5816 nfsd4_lockt(struct svc_rqst
*rqstp
, struct nfsd4_compound_state
*cstate
,
5817 struct nfsd4_lockt
*lockt
)
5819 struct file_lock
*file_lock
= NULL
;
5820 struct nfs4_lockowner
*lo
= NULL
;
5822 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
5824 if (locks_in_grace(SVC_NET(rqstp
)))
5825 return nfserr_grace
;
5827 if (check_lock_length(lockt
->lt_offset
, lockt
->lt_length
))
5828 return nfserr_inval
;
5830 if (!nfsd4_has_session(cstate
)) {
5831 status
= lookup_clientid(&lockt
->lt_clientid
, cstate
, nn
);
5836 if ((status
= fh_verify(rqstp
, &cstate
->current_fh
, S_IFREG
, 0)))
5839 file_lock
= locks_alloc_lock();
5841 dprintk("NFSD: %s: unable to allocate lock!\n", __func__
);
5842 status
= nfserr_jukebox
;
5846 switch (lockt
->lt_type
) {
5849 file_lock
->fl_type
= F_RDLCK
;
5852 case NFS4_WRITEW_LT
:
5853 file_lock
->fl_type
= F_WRLCK
;
5856 dprintk("NFSD: nfs4_lockt: bad lock type!\n");
5857 status
= nfserr_inval
;
5861 lo
= find_lockowner_str(cstate
->clp
, &lockt
->lt_owner
);
5863 file_lock
->fl_owner
= (fl_owner_t
)lo
;
5864 file_lock
->fl_pid
= current
->tgid
;
5865 file_lock
->fl_flags
= FL_POSIX
;
5867 file_lock
->fl_start
= lockt
->lt_offset
;
5868 file_lock
->fl_end
= last_byte_offset(lockt
->lt_offset
, lockt
->lt_length
);
5870 nfs4_transform_lock_offset(file_lock
);
5872 status
= nfsd_test_lock(rqstp
, &cstate
->current_fh
, file_lock
);
5876 if (file_lock
->fl_type
!= F_UNLCK
) {
5877 status
= nfserr_denied
;
5878 nfs4_set_lock_denied(file_lock
, &lockt
->lt_denied
);
5882 nfs4_put_stateowner(&lo
->lo_owner
);
5884 locks_free_lock(file_lock
);
5889 nfsd4_locku(struct svc_rqst
*rqstp
, struct nfsd4_compound_state
*cstate
,
5890 struct nfsd4_locku
*locku
)
5892 struct nfs4_ol_stateid
*stp
;
5893 struct file
*filp
= NULL
;
5894 struct file_lock
*file_lock
= NULL
;
5897 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
5899 dprintk("NFSD: nfsd4_locku: start=%Ld length=%Ld\n",
5900 (long long) locku
->lu_offset
,
5901 (long long) locku
->lu_length
);
5903 if (check_lock_length(locku
->lu_offset
, locku
->lu_length
))
5904 return nfserr_inval
;
5906 status
= nfs4_preprocess_seqid_op(cstate
, locku
->lu_seqid
,
5907 &locku
->lu_stateid
, NFS4_LOCK_STID
,
5911 filp
= find_any_file(stp
->st_stid
.sc_file
);
5913 status
= nfserr_lock_range
;
5916 file_lock
= locks_alloc_lock();
5918 dprintk("NFSD: %s: unable to allocate lock!\n", __func__
);
5919 status
= nfserr_jukebox
;
5923 file_lock
->fl_type
= F_UNLCK
;
5924 file_lock
->fl_owner
= (fl_owner_t
)lockowner(nfs4_get_stateowner(stp
->st_stateowner
));
5925 file_lock
->fl_pid
= current
->tgid
;
5926 file_lock
->fl_file
= filp
;
5927 file_lock
->fl_flags
= FL_POSIX
;
5928 file_lock
->fl_lmops
= &nfsd_posix_mng_ops
;
5929 file_lock
->fl_start
= locku
->lu_offset
;
5931 file_lock
->fl_end
= last_byte_offset(locku
->lu_offset
,
5933 nfs4_transform_lock_offset(file_lock
);
5935 err
= vfs_lock_file(filp
, F_SETLK
, file_lock
, NULL
);
5937 dprintk("NFSD: nfs4_locku: vfs_lock_file failed!\n");
5940 nfs4_inc_and_copy_stateid(&locku
->lu_stateid
, &stp
->st_stid
);
5944 mutex_unlock(&stp
->st_mutex
);
5945 nfs4_put_stid(&stp
->st_stid
);
5947 nfsd4_bump_seqid(cstate
, status
);
5949 locks_free_lock(file_lock
);
5953 status
= nfserrno(err
);
5959 * true: locks held by lockowner
5960 * false: no locks held by lockowner
5963 check_for_locks(struct nfs4_file
*fp
, struct nfs4_lockowner
*lowner
)
5965 struct file_lock
*fl
;
5967 struct file
*filp
= find_any_file(fp
);
5968 struct inode
*inode
;
5969 struct file_lock_context
*flctx
;
5972 /* Any valid lock stateid should have some sort of access */
5977 inode
= file_inode(filp
);
5978 flctx
= inode
->i_flctx
;
5980 if (flctx
&& !list_empty_careful(&flctx
->flc_posix
)) {
5981 spin_lock(&flctx
->flc_lock
);
5982 list_for_each_entry(fl
, &flctx
->flc_posix
, fl_list
) {
5983 if (fl
->fl_owner
== (fl_owner_t
)lowner
) {
5988 spin_unlock(&flctx
->flc_lock
);
5995 nfsd4_release_lockowner(struct svc_rqst
*rqstp
,
5996 struct nfsd4_compound_state
*cstate
,
5997 struct nfsd4_release_lockowner
*rlockowner
)
5999 clientid_t
*clid
= &rlockowner
->rl_clientid
;
6000 struct nfs4_stateowner
*sop
;
6001 struct nfs4_lockowner
*lo
= NULL
;
6002 struct nfs4_ol_stateid
*stp
;
6003 struct xdr_netobj
*owner
= &rlockowner
->rl_owner
;
6004 unsigned int hashval
= ownerstr_hashval(owner
);
6006 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
6007 struct nfs4_client
*clp
;
6008 LIST_HEAD (reaplist
);
6010 dprintk("nfsd4_release_lockowner clientid: (%08x/%08x):\n",
6011 clid
->cl_boot
, clid
->cl_id
);
6013 status
= lookup_clientid(clid
, cstate
, nn
);
6018 /* Find the matching lock stateowner */
6019 spin_lock(&clp
->cl_lock
);
6020 list_for_each_entry(sop
, &clp
->cl_ownerstr_hashtbl
[hashval
],
6023 if (sop
->so_is_open_owner
|| !same_owner_str(sop
, owner
))
6026 /* see if there are still any locks associated with it */
6027 lo
= lockowner(sop
);
6028 list_for_each_entry(stp
, &sop
->so_stateids
, st_perstateowner
) {
6029 if (check_for_locks(stp
->st_stid
.sc_file
, lo
)) {
6030 status
= nfserr_locks_held
;
6031 spin_unlock(&clp
->cl_lock
);
6036 nfs4_get_stateowner(sop
);
6040 spin_unlock(&clp
->cl_lock
);
6044 unhash_lockowner_locked(lo
);
6045 while (!list_empty(&lo
->lo_owner
.so_stateids
)) {
6046 stp
= list_first_entry(&lo
->lo_owner
.so_stateids
,
6047 struct nfs4_ol_stateid
,
6049 WARN_ON(!unhash_lock_stateid(stp
));
6050 put_ol_stateid_locked(stp
, &reaplist
);
6052 spin_unlock(&clp
->cl_lock
);
6053 free_ol_stateid_reaplist(&reaplist
);
6054 nfs4_put_stateowner(&lo
->lo_owner
);
6059 static inline struct nfs4_client_reclaim
*
6062 return kmalloc(sizeof(struct nfs4_client_reclaim
), GFP_KERNEL
);
6066 nfs4_has_reclaimed_state(const char *name
, struct nfsd_net
*nn
)
6068 struct nfs4_client_reclaim
*crp
;
6070 crp
= nfsd4_find_reclaim_client(name
, nn
);
6071 return (crp
&& crp
->cr_clp
);
6075 * failure => all reset bets are off, nfserr_no_grace...
6077 struct nfs4_client_reclaim
*
6078 nfs4_client_to_reclaim(const char *name
, struct nfsd_net
*nn
)
6080 unsigned int strhashval
;
6081 struct nfs4_client_reclaim
*crp
;
6083 dprintk("NFSD nfs4_client_to_reclaim NAME: %.*s\n", HEXDIR_LEN
, name
);
6084 crp
= alloc_reclaim();
6086 strhashval
= clientstr_hashval(name
);
6087 INIT_LIST_HEAD(&crp
->cr_strhash
);
6088 list_add(&crp
->cr_strhash
, &nn
->reclaim_str_hashtbl
[strhashval
]);
6089 memcpy(crp
->cr_recdir
, name
, HEXDIR_LEN
);
6091 nn
->reclaim_str_hashtbl_size
++;
6097 nfs4_remove_reclaim_record(struct nfs4_client_reclaim
*crp
, struct nfsd_net
*nn
)
6099 list_del(&crp
->cr_strhash
);
6101 nn
->reclaim_str_hashtbl_size
--;
6105 nfs4_release_reclaim(struct nfsd_net
*nn
)
6107 struct nfs4_client_reclaim
*crp
= NULL
;
6110 for (i
= 0; i
< CLIENT_HASH_SIZE
; i
++) {
6111 while (!list_empty(&nn
->reclaim_str_hashtbl
[i
])) {
6112 crp
= list_entry(nn
->reclaim_str_hashtbl
[i
].next
,
6113 struct nfs4_client_reclaim
, cr_strhash
);
6114 nfs4_remove_reclaim_record(crp
, nn
);
6117 WARN_ON_ONCE(nn
->reclaim_str_hashtbl_size
);
6121 * called from OPEN, CLAIM_PREVIOUS with a new clientid. */
6122 struct nfs4_client_reclaim
*
6123 nfsd4_find_reclaim_client(const char *recdir
, struct nfsd_net
*nn
)
6125 unsigned int strhashval
;
6126 struct nfs4_client_reclaim
*crp
= NULL
;
6128 dprintk("NFSD: nfs4_find_reclaim_client for recdir %s\n", recdir
);
6130 strhashval
= clientstr_hashval(recdir
);
6131 list_for_each_entry(crp
, &nn
->reclaim_str_hashtbl
[strhashval
], cr_strhash
) {
6132 if (same_name(crp
->cr_recdir
, recdir
)) {
6140 * Called from OPEN. Look for clientid in reclaim list.
6143 nfs4_check_open_reclaim(clientid_t
*clid
,
6144 struct nfsd4_compound_state
*cstate
,
6145 struct nfsd_net
*nn
)
6149 /* find clientid in conf_id_hashtbl */
6150 status
= lookup_clientid(clid
, cstate
, nn
);
6152 return nfserr_reclaim_bad
;
6154 if (test_bit(NFSD4_CLIENT_RECLAIM_COMPLETE
, &cstate
->clp
->cl_flags
))
6155 return nfserr_no_grace
;
6157 if (nfsd4_client_record_check(cstate
->clp
))
6158 return nfserr_reclaim_bad
;
6163 #ifdef CONFIG_NFSD_FAULT_INJECTION
6165 put_client(struct nfs4_client
*clp
)
6167 atomic_dec(&clp
->cl_refcount
);
6170 static struct nfs4_client
*
6171 nfsd_find_client(struct sockaddr_storage
*addr
, size_t addr_size
)
6173 struct nfs4_client
*clp
;
6174 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
6177 if (!nfsd_netns_ready(nn
))
6180 list_for_each_entry(clp
, &nn
->client_lru
, cl_lru
) {
6181 if (memcmp(&clp
->cl_addr
, addr
, addr_size
) == 0)
6188 nfsd_inject_print_clients(void)
6190 struct nfs4_client
*clp
;
6192 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
6194 char buf
[INET6_ADDRSTRLEN
];
6196 if (!nfsd_netns_ready(nn
))
6199 spin_lock(&nn
->client_lock
);
6200 list_for_each_entry(clp
, &nn
->client_lru
, cl_lru
) {
6201 rpc_ntop((struct sockaddr
*)&clp
->cl_addr
, buf
, sizeof(buf
));
6202 pr_info("NFS Client: %s\n", buf
);
6205 spin_unlock(&nn
->client_lock
);
6211 nfsd_inject_forget_client(struct sockaddr_storage
*addr
, size_t addr_size
)
6214 struct nfs4_client
*clp
;
6215 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
6218 if (!nfsd_netns_ready(nn
))
6221 spin_lock(&nn
->client_lock
);
6222 clp
= nfsd_find_client(addr
, addr_size
);
6224 if (mark_client_expired_locked(clp
) == nfs_ok
)
6229 spin_unlock(&nn
->client_lock
);
6238 nfsd_inject_forget_clients(u64 max
)
6241 struct nfs4_client
*clp
, *next
;
6242 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
6244 LIST_HEAD(reaplist
);
6246 if (!nfsd_netns_ready(nn
))
6249 spin_lock(&nn
->client_lock
);
6250 list_for_each_entry_safe(clp
, next
, &nn
->client_lru
, cl_lru
) {
6251 if (mark_client_expired_locked(clp
) == nfs_ok
) {
6252 list_add(&clp
->cl_lru
, &reaplist
);
6253 if (max
!= 0 && ++count
>= max
)
6257 spin_unlock(&nn
->client_lock
);
6259 list_for_each_entry_safe(clp
, next
, &reaplist
, cl_lru
)
6265 static void nfsd_print_count(struct nfs4_client
*clp
, unsigned int count
,
6268 char buf
[INET6_ADDRSTRLEN
];
6269 rpc_ntop((struct sockaddr
*)&clp
->cl_addr
, buf
, sizeof(buf
));
6270 printk(KERN_INFO
"NFS Client: %s has %u %s\n", buf
, count
, type
);
6274 nfsd_inject_add_lock_to_list(struct nfs4_ol_stateid
*lst
,
6275 struct list_head
*collect
)
6277 struct nfs4_client
*clp
= lst
->st_stid
.sc_client
;
6278 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
6284 lockdep_assert_held(&nn
->client_lock
);
6285 atomic_inc(&clp
->cl_refcount
);
6286 list_add(&lst
->st_locks
, collect
);
6289 static u64
nfsd_foreach_client_lock(struct nfs4_client
*clp
, u64 max
,
6290 struct list_head
*collect
,
6291 bool (*func
)(struct nfs4_ol_stateid
*))
6293 struct nfs4_openowner
*oop
;
6294 struct nfs4_ol_stateid
*stp
, *st_next
;
6295 struct nfs4_ol_stateid
*lst
, *lst_next
;
6298 spin_lock(&clp
->cl_lock
);
6299 list_for_each_entry(oop
, &clp
->cl_openowners
, oo_perclient
) {
6300 list_for_each_entry_safe(stp
, st_next
,
6301 &oop
->oo_owner
.so_stateids
, st_perstateowner
) {
6302 list_for_each_entry_safe(lst
, lst_next
,
6303 &stp
->st_locks
, st_locks
) {
6306 nfsd_inject_add_lock_to_list(lst
,
6311 * Despite the fact that these functions deal
6312 * with 64-bit integers for "count", we must
6313 * ensure that it doesn't blow up the
6314 * clp->cl_refcount. Throw a warning if we
6315 * start to approach INT_MAX here.
6317 WARN_ON_ONCE(count
== (INT_MAX
/ 2));
6324 spin_unlock(&clp
->cl_lock
);
6330 nfsd_collect_client_locks(struct nfs4_client
*clp
, struct list_head
*collect
,
6333 return nfsd_foreach_client_lock(clp
, max
, collect
, unhash_lock_stateid
);
6337 nfsd_print_client_locks(struct nfs4_client
*clp
)
6339 u64 count
= nfsd_foreach_client_lock(clp
, 0, NULL
, NULL
);
6340 nfsd_print_count(clp
, count
, "locked files");
6345 nfsd_inject_print_locks(void)
6347 struct nfs4_client
*clp
;
6349 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
6352 if (!nfsd_netns_ready(nn
))
6355 spin_lock(&nn
->client_lock
);
6356 list_for_each_entry(clp
, &nn
->client_lru
, cl_lru
)
6357 count
+= nfsd_print_client_locks(clp
);
6358 spin_unlock(&nn
->client_lock
);
6364 nfsd_reap_locks(struct list_head
*reaplist
)
6366 struct nfs4_client
*clp
;
6367 struct nfs4_ol_stateid
*stp
, *next
;
6369 list_for_each_entry_safe(stp
, next
, reaplist
, st_locks
) {
6370 list_del_init(&stp
->st_locks
);
6371 clp
= stp
->st_stid
.sc_client
;
6372 nfs4_put_stid(&stp
->st_stid
);
6378 nfsd_inject_forget_client_locks(struct sockaddr_storage
*addr
, size_t addr_size
)
6380 unsigned int count
= 0;
6381 struct nfs4_client
*clp
;
6382 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
6384 LIST_HEAD(reaplist
);
6386 if (!nfsd_netns_ready(nn
))
6389 spin_lock(&nn
->client_lock
);
6390 clp
= nfsd_find_client(addr
, addr_size
);
6392 count
= nfsd_collect_client_locks(clp
, &reaplist
, 0);
6393 spin_unlock(&nn
->client_lock
);
6394 nfsd_reap_locks(&reaplist
);
6399 nfsd_inject_forget_locks(u64 max
)
6402 struct nfs4_client
*clp
;
6403 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
6405 LIST_HEAD(reaplist
);
6407 if (!nfsd_netns_ready(nn
))
6410 spin_lock(&nn
->client_lock
);
6411 list_for_each_entry(clp
, &nn
->client_lru
, cl_lru
) {
6412 count
+= nfsd_collect_client_locks(clp
, &reaplist
, max
- count
);
6413 if (max
!= 0 && count
>= max
)
6416 spin_unlock(&nn
->client_lock
);
6417 nfsd_reap_locks(&reaplist
);
6422 nfsd_foreach_client_openowner(struct nfs4_client
*clp
, u64 max
,
6423 struct list_head
*collect
,
6424 void (*func
)(struct nfs4_openowner
*))
6426 struct nfs4_openowner
*oop
, *next
;
6427 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
6431 lockdep_assert_held(&nn
->client_lock
);
6433 spin_lock(&clp
->cl_lock
);
6434 list_for_each_entry_safe(oop
, next
, &clp
->cl_openowners
, oo_perclient
) {
6438 atomic_inc(&clp
->cl_refcount
);
6439 list_add(&oop
->oo_perclient
, collect
);
6444 * Despite the fact that these functions deal with
6445 * 64-bit integers for "count", we must ensure that
6446 * it doesn't blow up the clp->cl_refcount. Throw a
6447 * warning if we start to approach INT_MAX here.
6449 WARN_ON_ONCE(count
== (INT_MAX
/ 2));
6453 spin_unlock(&clp
->cl_lock
);
6459 nfsd_print_client_openowners(struct nfs4_client
*clp
)
6461 u64 count
= nfsd_foreach_client_openowner(clp
, 0, NULL
, NULL
);
6463 nfsd_print_count(clp
, count
, "openowners");
6468 nfsd_collect_client_openowners(struct nfs4_client
*clp
,
6469 struct list_head
*collect
, u64 max
)
6471 return nfsd_foreach_client_openowner(clp
, max
, collect
,
6472 unhash_openowner_locked
);
6476 nfsd_inject_print_openowners(void)
6478 struct nfs4_client
*clp
;
6480 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
6483 if (!nfsd_netns_ready(nn
))
6486 spin_lock(&nn
->client_lock
);
6487 list_for_each_entry(clp
, &nn
->client_lru
, cl_lru
)
6488 count
+= nfsd_print_client_openowners(clp
);
6489 spin_unlock(&nn
->client_lock
);
6495 nfsd_reap_openowners(struct list_head
*reaplist
)
6497 struct nfs4_client
*clp
;
6498 struct nfs4_openowner
*oop
, *next
;
6500 list_for_each_entry_safe(oop
, next
, reaplist
, oo_perclient
) {
6501 list_del_init(&oop
->oo_perclient
);
6502 clp
= oop
->oo_owner
.so_client
;
6503 release_openowner(oop
);
6509 nfsd_inject_forget_client_openowners(struct sockaddr_storage
*addr
,
6512 unsigned int count
= 0;
6513 struct nfs4_client
*clp
;
6514 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
6516 LIST_HEAD(reaplist
);
6518 if (!nfsd_netns_ready(nn
))
6521 spin_lock(&nn
->client_lock
);
6522 clp
= nfsd_find_client(addr
, addr_size
);
6524 count
= nfsd_collect_client_openowners(clp
, &reaplist
, 0);
6525 spin_unlock(&nn
->client_lock
);
6526 nfsd_reap_openowners(&reaplist
);
6531 nfsd_inject_forget_openowners(u64 max
)
6534 struct nfs4_client
*clp
;
6535 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
6537 LIST_HEAD(reaplist
);
6539 if (!nfsd_netns_ready(nn
))
6542 spin_lock(&nn
->client_lock
);
6543 list_for_each_entry(clp
, &nn
->client_lru
, cl_lru
) {
6544 count
+= nfsd_collect_client_openowners(clp
, &reaplist
,
6546 if (max
!= 0 && count
>= max
)
6549 spin_unlock(&nn
->client_lock
);
6550 nfsd_reap_openowners(&reaplist
);
6554 static u64
nfsd_find_all_delegations(struct nfs4_client
*clp
, u64 max
,
6555 struct list_head
*victims
)
6557 struct nfs4_delegation
*dp
, *next
;
6558 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
6562 lockdep_assert_held(&nn
->client_lock
);
6564 spin_lock(&state_lock
);
6565 list_for_each_entry_safe(dp
, next
, &clp
->cl_delegations
, dl_perclnt
) {
6568 * It's not safe to mess with delegations that have a
6569 * non-zero dl_time. They might have already been broken
6570 * and could be processed by the laundromat outside of
6571 * the state_lock. Just leave them be.
6573 if (dp
->dl_time
!= 0)
6576 atomic_inc(&clp
->cl_refcount
);
6577 WARN_ON(!unhash_delegation_locked(dp
));
6578 list_add(&dp
->dl_recall_lru
, victims
);
6582 * Despite the fact that these functions deal with
6583 * 64-bit integers for "count", we must ensure that
6584 * it doesn't blow up the clp->cl_refcount. Throw a
6585 * warning if we start to approach INT_MAX here.
6587 WARN_ON_ONCE(count
== (INT_MAX
/ 2));
6591 spin_unlock(&state_lock
);
6596 nfsd_print_client_delegations(struct nfs4_client
*clp
)
6598 u64 count
= nfsd_find_all_delegations(clp
, 0, NULL
);
6600 nfsd_print_count(clp
, count
, "delegations");
6605 nfsd_inject_print_delegations(void)
6607 struct nfs4_client
*clp
;
6609 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
6612 if (!nfsd_netns_ready(nn
))
6615 spin_lock(&nn
->client_lock
);
6616 list_for_each_entry(clp
, &nn
->client_lru
, cl_lru
)
6617 count
+= nfsd_print_client_delegations(clp
);
6618 spin_unlock(&nn
->client_lock
);
6624 nfsd_forget_delegations(struct list_head
*reaplist
)
6626 struct nfs4_client
*clp
;
6627 struct nfs4_delegation
*dp
, *next
;
6629 list_for_each_entry_safe(dp
, next
, reaplist
, dl_recall_lru
) {
6630 list_del_init(&dp
->dl_recall_lru
);
6631 clp
= dp
->dl_stid
.sc_client
;
6632 revoke_delegation(dp
);
6638 nfsd_inject_forget_client_delegations(struct sockaddr_storage
*addr
,
6642 struct nfs4_client
*clp
;
6643 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
6645 LIST_HEAD(reaplist
);
6647 if (!nfsd_netns_ready(nn
))
6650 spin_lock(&nn
->client_lock
);
6651 clp
= nfsd_find_client(addr
, addr_size
);
6653 count
= nfsd_find_all_delegations(clp
, 0, &reaplist
);
6654 spin_unlock(&nn
->client_lock
);
6656 nfsd_forget_delegations(&reaplist
);
6661 nfsd_inject_forget_delegations(u64 max
)
6664 struct nfs4_client
*clp
;
6665 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
6667 LIST_HEAD(reaplist
);
6669 if (!nfsd_netns_ready(nn
))
6672 spin_lock(&nn
->client_lock
);
6673 list_for_each_entry(clp
, &nn
->client_lru
, cl_lru
) {
6674 count
+= nfsd_find_all_delegations(clp
, max
- count
, &reaplist
);
6675 if (max
!= 0 && count
>= max
)
6678 spin_unlock(&nn
->client_lock
);
6679 nfsd_forget_delegations(&reaplist
);
6684 nfsd_recall_delegations(struct list_head
*reaplist
)
6686 struct nfs4_client
*clp
;
6687 struct nfs4_delegation
*dp
, *next
;
6689 list_for_each_entry_safe(dp
, next
, reaplist
, dl_recall_lru
) {
6690 list_del_init(&dp
->dl_recall_lru
);
6691 clp
= dp
->dl_stid
.sc_client
;
6693 * We skipped all entries that had a zero dl_time before,
6694 * so we can now reset the dl_time back to 0. If a delegation
6695 * break comes in now, then it won't make any difference since
6696 * we're recalling it either way.
6698 spin_lock(&state_lock
);
6700 spin_unlock(&state_lock
);
6701 nfsd_break_one_deleg(dp
);
6707 nfsd_inject_recall_client_delegations(struct sockaddr_storage
*addr
,
6711 struct nfs4_client
*clp
;
6712 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
6714 LIST_HEAD(reaplist
);
6716 if (!nfsd_netns_ready(nn
))
6719 spin_lock(&nn
->client_lock
);
6720 clp
= nfsd_find_client(addr
, addr_size
);
6722 count
= nfsd_find_all_delegations(clp
, 0, &reaplist
);
6723 spin_unlock(&nn
->client_lock
);
6725 nfsd_recall_delegations(&reaplist
);
6730 nfsd_inject_recall_delegations(u64 max
)
6733 struct nfs4_client
*clp
, *next
;
6734 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
6736 LIST_HEAD(reaplist
);
6738 if (!nfsd_netns_ready(nn
))
6741 spin_lock(&nn
->client_lock
);
6742 list_for_each_entry_safe(clp
, next
, &nn
->client_lru
, cl_lru
) {
6743 count
+= nfsd_find_all_delegations(clp
, max
- count
, &reaplist
);
6744 if (max
!= 0 && ++count
>= max
)
6747 spin_unlock(&nn
->client_lock
);
6748 nfsd_recall_delegations(&reaplist
);
6751 #endif /* CONFIG_NFSD_FAULT_INJECTION */
6754 * Since the lifetime of a delegation isn't limited to that of an open, a
6755 * client may quite reasonably hang on to a delegation as long as it has
6756 * the inode cached. This becomes an obvious problem the first time a
6757 * client's inode cache approaches the size of the server's total memory.
6759 * For now we avoid this problem by imposing a hard limit on the number
6760 * of delegations, which varies according to the server's memory size.
6763 set_max_delegations(void)
6766 * Allow at most 4 delegations per megabyte of RAM. Quick
6767 * estimates suggest that in the worst case (where every delegation
6768 * is for a different inode), a delegation could take about 1.5K,
6769 * giving a worst case usage of about 6% of memory.
6771 max_delegations
= nr_free_buffer_pages() >> (20 - 2 - PAGE_SHIFT
);
6774 static int nfs4_state_create_net(struct net
*net
)
6776 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
6779 nn
->conf_id_hashtbl
= kmalloc(sizeof(struct list_head
) *
6780 CLIENT_HASH_SIZE
, GFP_KERNEL
);
6781 if (!nn
->conf_id_hashtbl
)
6783 nn
->unconf_id_hashtbl
= kmalloc(sizeof(struct list_head
) *
6784 CLIENT_HASH_SIZE
, GFP_KERNEL
);
6785 if (!nn
->unconf_id_hashtbl
)
6787 nn
->sessionid_hashtbl
= kmalloc(sizeof(struct list_head
) *
6788 SESSION_HASH_SIZE
, GFP_KERNEL
);
6789 if (!nn
->sessionid_hashtbl
)
6792 for (i
= 0; i
< CLIENT_HASH_SIZE
; i
++) {
6793 INIT_LIST_HEAD(&nn
->conf_id_hashtbl
[i
]);
6794 INIT_LIST_HEAD(&nn
->unconf_id_hashtbl
[i
]);
6796 for (i
= 0; i
< SESSION_HASH_SIZE
; i
++)
6797 INIT_LIST_HEAD(&nn
->sessionid_hashtbl
[i
]);
6798 nn
->conf_name_tree
= RB_ROOT
;
6799 nn
->unconf_name_tree
= RB_ROOT
;
6800 nn
->boot_time
= get_seconds();
6801 nn
->grace_ended
= false;
6802 nn
->nfsd4_manager
.block_opens
= true;
6803 INIT_LIST_HEAD(&nn
->nfsd4_manager
.list
);
6804 INIT_LIST_HEAD(&nn
->client_lru
);
6805 INIT_LIST_HEAD(&nn
->close_lru
);
6806 INIT_LIST_HEAD(&nn
->del_recall_lru
);
6807 spin_lock_init(&nn
->client_lock
);
6809 INIT_DELAYED_WORK(&nn
->laundromat_work
, laundromat_main
);
6815 kfree(nn
->unconf_id_hashtbl
);
6817 kfree(nn
->conf_id_hashtbl
);
6823 nfs4_state_destroy_net(struct net
*net
)
6826 struct nfs4_client
*clp
= NULL
;
6827 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
6829 for (i
= 0; i
< CLIENT_HASH_SIZE
; i
++) {
6830 while (!list_empty(&nn
->conf_id_hashtbl
[i
])) {
6831 clp
= list_entry(nn
->conf_id_hashtbl
[i
].next
, struct nfs4_client
, cl_idhash
);
6832 destroy_client(clp
);
6836 for (i
= 0; i
< CLIENT_HASH_SIZE
; i
++) {
6837 while (!list_empty(&nn
->unconf_id_hashtbl
[i
])) {
6838 clp
= list_entry(nn
->unconf_id_hashtbl
[i
].next
, struct nfs4_client
, cl_idhash
);
6839 destroy_client(clp
);
6843 kfree(nn
->sessionid_hashtbl
);
6844 kfree(nn
->unconf_id_hashtbl
);
6845 kfree(nn
->conf_id_hashtbl
);
6850 nfs4_state_start_net(struct net
*net
)
6852 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
6855 ret
= nfs4_state_create_net(net
);
6858 locks_start_grace(net
, &nn
->nfsd4_manager
);
6859 nfsd4_client_tracking_init(net
);
6860 printk(KERN_INFO
"NFSD: starting %ld-second grace period (net %p)\n",
6861 nn
->nfsd4_grace
, net
);
6862 queue_delayed_work(laundry_wq
, &nn
->laundromat_work
, nn
->nfsd4_grace
* HZ
);
6866 /* initialization to perform when the nfsd service is started: */
6869 nfs4_state_start(void)
6873 ret
= set_callback_cred();
6877 laundry_wq
= alloc_workqueue("%s", WQ_UNBOUND
, 0, "nfsd4");
6878 if (laundry_wq
== NULL
) {
6880 goto out_cleanup_cred
;
6882 ret
= nfsd4_create_callback_queue();
6884 goto out_free_laundry
;
6886 set_max_delegations();
6890 destroy_workqueue(laundry_wq
);
6892 cleanup_callback_cred();
6897 nfs4_state_shutdown_net(struct net
*net
)
6899 struct nfs4_delegation
*dp
= NULL
;
6900 struct list_head
*pos
, *next
, reaplist
;
6901 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
6903 cancel_delayed_work_sync(&nn
->laundromat_work
);
6904 locks_end_grace(&nn
->nfsd4_manager
);
6906 INIT_LIST_HEAD(&reaplist
);
6907 spin_lock(&state_lock
);
6908 list_for_each_safe(pos
, next
, &nn
->del_recall_lru
) {
6909 dp
= list_entry (pos
, struct nfs4_delegation
, dl_recall_lru
);
6910 WARN_ON(!unhash_delegation_locked(dp
));
6911 list_add(&dp
->dl_recall_lru
, &reaplist
);
6913 spin_unlock(&state_lock
);
6914 list_for_each_safe(pos
, next
, &reaplist
) {
6915 dp
= list_entry (pos
, struct nfs4_delegation
, dl_recall_lru
);
6916 list_del_init(&dp
->dl_recall_lru
);
6917 put_clnt_odstate(dp
->dl_clnt_odstate
);
6918 nfs4_put_deleg_lease(dp
->dl_stid
.sc_file
);
6919 nfs4_put_stid(&dp
->dl_stid
);
6922 nfsd4_client_tracking_exit(net
);
6923 nfs4_state_destroy_net(net
);
6927 nfs4_state_shutdown(void)
6929 destroy_workqueue(laundry_wq
);
6930 nfsd4_destroy_callback_queue();
6931 cleanup_callback_cred();
6935 get_stateid(struct nfsd4_compound_state
*cstate
, stateid_t
*stateid
)
6937 if (HAS_STATE_ID(cstate
, CURRENT_STATE_ID_FLAG
) && CURRENT_STATEID(stateid
))
6938 memcpy(stateid
, &cstate
->current_stateid
, sizeof(stateid_t
));
6942 put_stateid(struct nfsd4_compound_state
*cstate
, stateid_t
*stateid
)
6944 if (cstate
->minorversion
) {
6945 memcpy(&cstate
->current_stateid
, stateid
, sizeof(stateid_t
));
6946 SET_STATE_ID(cstate
, CURRENT_STATE_ID_FLAG
);
6951 clear_current_stateid(struct nfsd4_compound_state
*cstate
)
6953 CLEAR_STATE_ID(cstate
, CURRENT_STATE_ID_FLAG
);
6957 * functions to set current state id
6960 nfsd4_set_opendowngradestateid(struct nfsd4_compound_state
*cstate
, struct nfsd4_open_downgrade
*odp
)
6962 put_stateid(cstate
, &odp
->od_stateid
);
6966 nfsd4_set_openstateid(struct nfsd4_compound_state
*cstate
, struct nfsd4_open
*open
)
6968 put_stateid(cstate
, &open
->op_stateid
);
6972 nfsd4_set_closestateid(struct nfsd4_compound_state
*cstate
, struct nfsd4_close
*close
)
6974 put_stateid(cstate
, &close
->cl_stateid
);
6978 nfsd4_set_lockstateid(struct nfsd4_compound_state
*cstate
, struct nfsd4_lock
*lock
)
6980 put_stateid(cstate
, &lock
->lk_resp_stateid
);
6984 * functions to consume current state id
6988 nfsd4_get_opendowngradestateid(struct nfsd4_compound_state
*cstate
, struct nfsd4_open_downgrade
*odp
)
6990 get_stateid(cstate
, &odp
->od_stateid
);
6994 nfsd4_get_delegreturnstateid(struct nfsd4_compound_state
*cstate
, struct nfsd4_delegreturn
*drp
)
6996 get_stateid(cstate
, &drp
->dr_stateid
);
7000 nfsd4_get_freestateid(struct nfsd4_compound_state
*cstate
, struct nfsd4_free_stateid
*fsp
)
7002 get_stateid(cstate
, &fsp
->fr_stateid
);
7006 nfsd4_get_setattrstateid(struct nfsd4_compound_state
*cstate
, struct nfsd4_setattr
*setattr
)
7008 get_stateid(cstate
, &setattr
->sa_stateid
);
7012 nfsd4_get_closestateid(struct nfsd4_compound_state
*cstate
, struct nfsd4_close
*close
)
7014 get_stateid(cstate
, &close
->cl_stateid
);
7018 nfsd4_get_lockustateid(struct nfsd4_compound_state
*cstate
, struct nfsd4_locku
*locku
)
7020 get_stateid(cstate
, &locku
->lu_stateid
);
7024 nfsd4_get_readstateid(struct nfsd4_compound_state
*cstate
, struct nfsd4_read
*read
)
7026 get_stateid(cstate
, &read
->rd_stateid
);
7030 nfsd4_get_writestateid(struct nfsd4_compound_state
*cstate
, struct nfsd4_write
*write
)
7032 get_stateid(cstate
, &write
->wr_stateid
);