2 * Copyright (c) 2001 The Regents of the University of Michigan.
5 * Kendrick Smith <kmsmith@umich.edu>
6 * Andy Adamson <kandros@umich.edu>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the name of the University nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
22 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
23 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
28 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
29 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
30 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
31 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 #include <linux/file.h>
37 #include <linux/slab.h>
38 #include <linux/namei.h>
39 #include <linux/swap.h>
40 #include <linux/pagemap.h>
41 #include <linux/ratelimit.h>
42 #include <linux/sunrpc/svcauth_gss.h>
43 #include <linux/sunrpc/addr.h>
44 #include <linux/hash.h>
48 #include "current_stateid.h"
52 #define NFSDDBG_FACILITY NFSDDBG_PROC
54 #define all_ones {{~0,~0},~0}
55 static const stateid_t one_stateid
= {
57 .si_opaque
= all_ones
,
59 static const stateid_t zero_stateid
= {
62 static const stateid_t currentstateid
= {
65 static const stateid_t close_stateid
= {
66 .si_generation
= 0xffffffffU
,
69 static u64 current_sessionid
= 1;
71 #define ZERO_STATEID(stateid) (!memcmp((stateid), &zero_stateid, sizeof(stateid_t)))
72 #define ONE_STATEID(stateid) (!memcmp((stateid), &one_stateid, sizeof(stateid_t)))
73 #define CURRENT_STATEID(stateid) (!memcmp((stateid), ¤tstateid, sizeof(stateid_t)))
74 #define CLOSE_STATEID(stateid) (!memcmp((stateid), &close_stateid, sizeof(stateid_t)))
76 /* forward declarations */
77 static bool check_for_locks(struct nfs4_file
*fp
, struct nfs4_lockowner
*lowner
);
78 static void nfs4_free_ol_stateid(struct nfs4_stid
*stid
);
83 * Currently used for the del_recall_lru and file hash table. In an
84 * effort to decrease the scope of the client_mutex, this spinlock may
85 * eventually cover more:
87 static DEFINE_SPINLOCK(state_lock
);
90 * A waitqueue for all in-progress 4.0 CLOSE operations that are waiting for
91 * the refcount on the open stateid to drop.
93 static DECLARE_WAIT_QUEUE_HEAD(close_wq
);
95 static struct kmem_cache
*openowner_slab
;
96 static struct kmem_cache
*lockowner_slab
;
97 static struct kmem_cache
*file_slab
;
98 static struct kmem_cache
*stateid_slab
;
99 static struct kmem_cache
*deleg_slab
;
101 static void free_session(struct nfsd4_session
*);
103 static struct nfsd4_callback_ops nfsd4_cb_recall_ops
;
105 static bool is_session_dead(struct nfsd4_session
*ses
)
107 return ses
->se_flags
& NFS4_SESSION_DEAD
;
110 static __be32
mark_session_dead_locked(struct nfsd4_session
*ses
, int ref_held_by_me
)
112 if (atomic_read(&ses
->se_ref
) > ref_held_by_me
)
113 return nfserr_jukebox
;
114 ses
->se_flags
|= NFS4_SESSION_DEAD
;
118 static bool is_client_expired(struct nfs4_client
*clp
)
120 return clp
->cl_time
== 0;
123 static __be32
get_client_locked(struct nfs4_client
*clp
)
125 struct nfsd_net
*nn
= net_generic(clp
->net
, nfsd_net_id
);
127 lockdep_assert_held(&nn
->client_lock
);
129 if (is_client_expired(clp
))
130 return nfserr_expired
;
131 atomic_inc(&clp
->cl_refcount
);
135 /* must be called under the client_lock */
137 renew_client_locked(struct nfs4_client
*clp
)
139 struct nfsd_net
*nn
= net_generic(clp
->net
, nfsd_net_id
);
141 if (is_client_expired(clp
)) {
143 printk("%s: client (clientid %08x/%08x) already expired\n",
145 clp
->cl_clientid
.cl_boot
,
146 clp
->cl_clientid
.cl_id
);
150 dprintk("renewing client (clientid %08x/%08x)\n",
151 clp
->cl_clientid
.cl_boot
,
152 clp
->cl_clientid
.cl_id
);
153 list_move_tail(&clp
->cl_lru
, &nn
->client_lru
);
154 clp
->cl_time
= get_seconds();
158 renew_client(struct nfs4_client
*clp
)
160 struct nfsd_net
*nn
= net_generic(clp
->net
, nfsd_net_id
);
162 spin_lock(&nn
->client_lock
);
163 renew_client_locked(clp
);
164 spin_unlock(&nn
->client_lock
);
167 static void put_client_renew_locked(struct nfs4_client
*clp
)
169 struct nfsd_net
*nn
= net_generic(clp
->net
, nfsd_net_id
);
171 lockdep_assert_held(&nn
->client_lock
);
173 if (!atomic_dec_and_test(&clp
->cl_refcount
))
175 if (!is_client_expired(clp
))
176 renew_client_locked(clp
);
179 static void put_client_renew(struct nfs4_client
*clp
)
181 struct nfsd_net
*nn
= net_generic(clp
->net
, nfsd_net_id
);
183 if (!atomic_dec_and_lock(&clp
->cl_refcount
, &nn
->client_lock
))
185 if (!is_client_expired(clp
))
186 renew_client_locked(clp
);
187 spin_unlock(&nn
->client_lock
);
190 static __be32
nfsd4_get_session_locked(struct nfsd4_session
*ses
)
194 if (is_session_dead(ses
))
195 return nfserr_badsession
;
196 status
= get_client_locked(ses
->se_client
);
199 atomic_inc(&ses
->se_ref
);
203 static void nfsd4_put_session_locked(struct nfsd4_session
*ses
)
205 struct nfs4_client
*clp
= ses
->se_client
;
206 struct nfsd_net
*nn
= net_generic(clp
->net
, nfsd_net_id
);
208 lockdep_assert_held(&nn
->client_lock
);
210 if (atomic_dec_and_test(&ses
->se_ref
) && is_session_dead(ses
))
212 put_client_renew_locked(clp
);
215 static void nfsd4_put_session(struct nfsd4_session
*ses
)
217 struct nfs4_client
*clp
= ses
->se_client
;
218 struct nfsd_net
*nn
= net_generic(clp
->net
, nfsd_net_id
);
220 spin_lock(&nn
->client_lock
);
221 nfsd4_put_session_locked(ses
);
222 spin_unlock(&nn
->client_lock
);
225 static inline struct nfs4_stateowner
*
226 nfs4_get_stateowner(struct nfs4_stateowner
*sop
)
228 atomic_inc(&sop
->so_count
);
233 same_owner_str(struct nfs4_stateowner
*sop
, struct xdr_netobj
*owner
)
235 return (sop
->so_owner
.len
== owner
->len
) &&
236 0 == memcmp(sop
->so_owner
.data
, owner
->data
, owner
->len
);
239 static struct nfs4_openowner
*
240 find_openstateowner_str_locked(unsigned int hashval
, struct nfsd4_open
*open
,
241 struct nfs4_client
*clp
)
243 struct nfs4_stateowner
*so
;
245 lockdep_assert_held(&clp
->cl_lock
);
247 list_for_each_entry(so
, &clp
->cl_ownerstr_hashtbl
[hashval
],
249 if (!so
->so_is_open_owner
)
251 if (same_owner_str(so
, &open
->op_owner
))
252 return openowner(nfs4_get_stateowner(so
));
257 static struct nfs4_openowner
*
258 find_openstateowner_str(unsigned int hashval
, struct nfsd4_open
*open
,
259 struct nfs4_client
*clp
)
261 struct nfs4_openowner
*oo
;
263 spin_lock(&clp
->cl_lock
);
264 oo
= find_openstateowner_str_locked(hashval
, open
, clp
);
265 spin_unlock(&clp
->cl_lock
);
270 opaque_hashval(const void *ptr
, int nbytes
)
272 unsigned char *cptr
= (unsigned char *) ptr
;
282 static void nfsd4_free_file(struct nfs4_file
*f
)
284 kmem_cache_free(file_slab
, f
);
288 put_nfs4_file(struct nfs4_file
*fi
)
290 might_lock(&state_lock
);
292 if (atomic_dec_and_lock(&fi
->fi_ref
, &state_lock
)) {
293 hlist_del(&fi
->fi_hash
);
294 spin_unlock(&state_lock
);
300 get_nfs4_file(struct nfs4_file
*fi
)
302 atomic_inc(&fi
->fi_ref
);
306 __nfs4_get_fd(struct nfs4_file
*f
, int oflag
)
308 if (f
->fi_fds
[oflag
])
309 return get_file(f
->fi_fds
[oflag
]);
314 find_writeable_file_locked(struct nfs4_file
*f
)
318 lockdep_assert_held(&f
->fi_lock
);
320 ret
= __nfs4_get_fd(f
, O_WRONLY
);
322 ret
= __nfs4_get_fd(f
, O_RDWR
);
327 find_writeable_file(struct nfs4_file
*f
)
331 spin_lock(&f
->fi_lock
);
332 ret
= find_writeable_file_locked(f
);
333 spin_unlock(&f
->fi_lock
);
338 static struct file
*find_readable_file_locked(struct nfs4_file
*f
)
342 lockdep_assert_held(&f
->fi_lock
);
344 ret
= __nfs4_get_fd(f
, O_RDONLY
);
346 ret
= __nfs4_get_fd(f
, O_RDWR
);
351 find_readable_file(struct nfs4_file
*f
)
355 spin_lock(&f
->fi_lock
);
356 ret
= find_readable_file_locked(f
);
357 spin_unlock(&f
->fi_lock
);
363 find_any_file(struct nfs4_file
*f
)
367 spin_lock(&f
->fi_lock
);
368 ret
= __nfs4_get_fd(f
, O_RDWR
);
370 ret
= __nfs4_get_fd(f
, O_WRONLY
);
372 ret
= __nfs4_get_fd(f
, O_RDONLY
);
374 spin_unlock(&f
->fi_lock
);
378 static atomic_long_t num_delegations
;
379 unsigned long max_delegations
;
382 * Open owner state (share locks)
385 /* hash tables for lock and open owners */
386 #define OWNER_HASH_BITS 8
387 #define OWNER_HASH_SIZE (1 << OWNER_HASH_BITS)
388 #define OWNER_HASH_MASK (OWNER_HASH_SIZE - 1)
390 static unsigned int ownerstr_hashval(struct xdr_netobj
*ownername
)
394 ret
= opaque_hashval(ownername
->data
, ownername
->len
);
395 return ret
& OWNER_HASH_MASK
;
398 /* hash table for nfs4_file */
399 #define FILE_HASH_BITS 8
400 #define FILE_HASH_SIZE (1 << FILE_HASH_BITS)
402 static unsigned int nfsd_fh_hashval(struct knfsd_fh
*fh
)
404 return jhash2(fh
->fh_base
.fh_pad
, XDR_QUADLEN(fh
->fh_size
), 0);
407 static unsigned int file_hashval(struct knfsd_fh
*fh
)
409 return nfsd_fh_hashval(fh
) & (FILE_HASH_SIZE
- 1);
412 static bool nfsd_fh_match(struct knfsd_fh
*fh1
, struct knfsd_fh
*fh2
)
414 return fh1
->fh_size
== fh2
->fh_size
&&
415 !memcmp(fh1
->fh_base
.fh_pad
,
420 static struct hlist_head file_hashtbl
[FILE_HASH_SIZE
];
423 __nfs4_file_get_access(struct nfs4_file
*fp
, u32 access
)
425 lockdep_assert_held(&fp
->fi_lock
);
427 if (access
& NFS4_SHARE_ACCESS_WRITE
)
428 atomic_inc(&fp
->fi_access
[O_WRONLY
]);
429 if (access
& NFS4_SHARE_ACCESS_READ
)
430 atomic_inc(&fp
->fi_access
[O_RDONLY
]);
434 nfs4_file_get_access(struct nfs4_file
*fp
, u32 access
)
436 lockdep_assert_held(&fp
->fi_lock
);
438 /* Does this access mode make sense? */
439 if (access
& ~NFS4_SHARE_ACCESS_BOTH
)
442 /* Does it conflict with a deny mode already set? */
443 if ((access
& fp
->fi_share_deny
) != 0)
444 return nfserr_share_denied
;
446 __nfs4_file_get_access(fp
, access
);
450 static __be32
nfs4_file_check_deny(struct nfs4_file
*fp
, u32 deny
)
452 /* Common case is that there is no deny mode. */
454 /* Does this deny mode make sense? */
455 if (deny
& ~NFS4_SHARE_DENY_BOTH
)
458 if ((deny
& NFS4_SHARE_DENY_READ
) &&
459 atomic_read(&fp
->fi_access
[O_RDONLY
]))
460 return nfserr_share_denied
;
462 if ((deny
& NFS4_SHARE_DENY_WRITE
) &&
463 atomic_read(&fp
->fi_access
[O_WRONLY
]))
464 return nfserr_share_denied
;
469 static void __nfs4_file_put_access(struct nfs4_file
*fp
, int oflag
)
471 might_lock(&fp
->fi_lock
);
473 if (atomic_dec_and_lock(&fp
->fi_access
[oflag
], &fp
->fi_lock
)) {
474 struct file
*f1
= NULL
;
475 struct file
*f2
= NULL
;
477 swap(f1
, fp
->fi_fds
[oflag
]);
478 if (atomic_read(&fp
->fi_access
[1 - oflag
]) == 0)
479 swap(f2
, fp
->fi_fds
[O_RDWR
]);
480 spin_unlock(&fp
->fi_lock
);
488 static void nfs4_file_put_access(struct nfs4_file
*fp
, u32 access
)
490 WARN_ON_ONCE(access
& ~NFS4_SHARE_ACCESS_BOTH
);
492 if (access
& NFS4_SHARE_ACCESS_WRITE
)
493 __nfs4_file_put_access(fp
, O_WRONLY
);
494 if (access
& NFS4_SHARE_ACCESS_READ
)
495 __nfs4_file_put_access(fp
, O_RDONLY
);
498 static struct nfs4_stid
*nfs4_alloc_stid(struct nfs4_client
*cl
,
499 struct kmem_cache
*slab
)
501 struct nfs4_stid
*stid
;
504 stid
= kmem_cache_zalloc(slab
, GFP_KERNEL
);
508 idr_preload(GFP_KERNEL
);
509 spin_lock(&cl
->cl_lock
);
510 new_id
= idr_alloc_cyclic(&cl
->cl_stateids
, stid
, 0, 0, GFP_NOWAIT
);
511 spin_unlock(&cl
->cl_lock
);
515 stid
->sc_client
= cl
;
516 stid
->sc_stateid
.si_opaque
.so_id
= new_id
;
517 stid
->sc_stateid
.si_opaque
.so_clid
= cl
->cl_clientid
;
518 /* Will be incremented before return to client: */
519 atomic_set(&stid
->sc_count
, 1);
522 * It shouldn't be a problem to reuse an opaque stateid value.
523 * I don't think it is for 4.1. But with 4.0 I worry that, for
524 * example, a stray write retransmission could be accepted by
525 * the server when it should have been rejected. Therefore,
526 * adopt a trick from the sctp code to attempt to maximize the
527 * amount of time until an id is reused, by ensuring they always
528 * "increase" (mod INT_MAX):
532 kmem_cache_free(slab
, stid
);
536 static struct nfs4_ol_stateid
* nfs4_alloc_open_stateid(struct nfs4_client
*clp
)
538 struct nfs4_stid
*stid
;
539 struct nfs4_ol_stateid
*stp
;
541 stid
= nfs4_alloc_stid(clp
, stateid_slab
);
545 stp
= openlockstateid(stid
);
546 stp
->st_stid
.sc_free
= nfs4_free_ol_stateid
;
550 static void nfs4_free_deleg(struct nfs4_stid
*stid
)
552 kmem_cache_free(deleg_slab
, stid
);
553 atomic_long_dec(&num_delegations
);
557 * When we recall a delegation, we should be careful not to hand it
558 * out again straight away.
559 * To ensure this we keep a pair of bloom filters ('new' and 'old')
560 * in which the filehandles of recalled delegations are "stored".
561 * If a filehandle appear in either filter, a delegation is blocked.
562 * When a delegation is recalled, the filehandle is stored in the "new"
564 * Every 30 seconds we swap the filters and clear the "new" one,
565 * unless both are empty of course.
567 * Each filter is 256 bits. We hash the filehandle to 32bit and use the
568 * low 3 bytes as hash-table indices.
570 * 'blocked_delegations_lock', which is always taken in block_delegations(),
571 * is used to manage concurrent access. Testing does not need the lock
572 * except when swapping the two filters.
574 static DEFINE_SPINLOCK(blocked_delegations_lock
);
575 static struct bloom_pair
{
576 int entries
, old_entries
;
578 int new; /* index into 'set' */
579 DECLARE_BITMAP(set
[2], 256);
580 } blocked_delegations
;
582 static int delegation_blocked(struct knfsd_fh
*fh
)
585 struct bloom_pair
*bd
= &blocked_delegations
;
587 if (bd
->entries
== 0)
589 if (seconds_since_boot() - bd
->swap_time
> 30) {
590 spin_lock(&blocked_delegations_lock
);
591 if (seconds_since_boot() - bd
->swap_time
> 30) {
592 bd
->entries
-= bd
->old_entries
;
593 bd
->old_entries
= bd
->entries
;
594 memset(bd
->set
[bd
->new], 0,
597 bd
->swap_time
= seconds_since_boot();
599 spin_unlock(&blocked_delegations_lock
);
601 hash
= arch_fast_hash(&fh
->fh_base
, fh
->fh_size
, 0);
602 if (test_bit(hash
&255, bd
->set
[0]) &&
603 test_bit((hash
>>8)&255, bd
->set
[0]) &&
604 test_bit((hash
>>16)&255, bd
->set
[0]))
607 if (test_bit(hash
&255, bd
->set
[1]) &&
608 test_bit((hash
>>8)&255, bd
->set
[1]) &&
609 test_bit((hash
>>16)&255, bd
->set
[1]))
615 static void block_delegations(struct knfsd_fh
*fh
)
618 struct bloom_pair
*bd
= &blocked_delegations
;
620 hash
= arch_fast_hash(&fh
->fh_base
, fh
->fh_size
, 0);
622 spin_lock(&blocked_delegations_lock
);
623 __set_bit(hash
&255, bd
->set
[bd
->new]);
624 __set_bit((hash
>>8)&255, bd
->set
[bd
->new]);
625 __set_bit((hash
>>16)&255, bd
->set
[bd
->new]);
626 if (bd
->entries
== 0)
627 bd
->swap_time
= seconds_since_boot();
629 spin_unlock(&blocked_delegations_lock
);
632 static struct nfs4_delegation
*
633 alloc_init_deleg(struct nfs4_client
*clp
, struct svc_fh
*current_fh
)
635 struct nfs4_delegation
*dp
;
638 dprintk("NFSD alloc_init_deleg\n");
639 n
= atomic_long_inc_return(&num_delegations
);
640 if (n
< 0 || n
> max_delegations
)
642 if (delegation_blocked(¤t_fh
->fh_handle
))
644 dp
= delegstateid(nfs4_alloc_stid(clp
, deleg_slab
));
648 dp
->dl_stid
.sc_free
= nfs4_free_deleg
;
650 * delegation seqid's are never incremented. The 4.1 special
651 * meaning of seqid 0 isn't meaningful, really, but let's avoid
652 * 0 anyway just for consistency and use 1:
654 dp
->dl_stid
.sc_stateid
.si_generation
= 1;
655 INIT_LIST_HEAD(&dp
->dl_perfile
);
656 INIT_LIST_HEAD(&dp
->dl_perclnt
);
657 INIT_LIST_HEAD(&dp
->dl_recall_lru
);
658 dp
->dl_type
= NFS4_OPEN_DELEGATE_READ
;
660 nfsd4_init_cb(&dp
->dl_recall
, dp
->dl_stid
.sc_client
,
661 &nfsd4_cb_recall_ops
, NFSPROC4_CLNT_CB_RECALL
);
664 atomic_long_dec(&num_delegations
);
669 nfs4_put_stid(struct nfs4_stid
*s
)
671 struct nfs4_file
*fp
= s
->sc_file
;
672 struct nfs4_client
*clp
= s
->sc_client
;
674 might_lock(&clp
->cl_lock
);
676 if (!atomic_dec_and_lock(&s
->sc_count
, &clp
->cl_lock
)) {
677 wake_up_all(&close_wq
);
680 idr_remove(&clp
->cl_stateids
, s
->sc_stateid
.si_opaque
.so_id
);
681 spin_unlock(&clp
->cl_lock
);
687 static void nfs4_put_deleg_lease(struct nfs4_file
*fp
)
689 struct file
*filp
= NULL
;
691 spin_lock(&fp
->fi_lock
);
692 if (fp
->fi_deleg_file
&& atomic_dec_and_test(&fp
->fi_delegees
))
693 swap(filp
, fp
->fi_deleg_file
);
694 spin_unlock(&fp
->fi_lock
);
697 vfs_setlease(filp
, F_UNLCK
, NULL
, NULL
);
702 static void unhash_stid(struct nfs4_stid
*s
)
708 hash_delegation_locked(struct nfs4_delegation
*dp
, struct nfs4_file
*fp
)
710 lockdep_assert_held(&state_lock
);
711 lockdep_assert_held(&fp
->fi_lock
);
713 atomic_inc(&dp
->dl_stid
.sc_count
);
714 dp
->dl_stid
.sc_type
= NFS4_DELEG_STID
;
715 list_add(&dp
->dl_perfile
, &fp
->fi_delegations
);
716 list_add(&dp
->dl_perclnt
, &dp
->dl_stid
.sc_client
->cl_delegations
);
720 unhash_delegation_locked(struct nfs4_delegation
*dp
)
722 struct nfs4_file
*fp
= dp
->dl_stid
.sc_file
;
724 lockdep_assert_held(&state_lock
);
726 dp
->dl_stid
.sc_type
= NFS4_CLOSED_DELEG_STID
;
727 /* Ensure that deleg break won't try to requeue it */
729 spin_lock(&fp
->fi_lock
);
730 list_del_init(&dp
->dl_perclnt
);
731 list_del_init(&dp
->dl_recall_lru
);
732 list_del_init(&dp
->dl_perfile
);
733 spin_unlock(&fp
->fi_lock
);
736 static void destroy_delegation(struct nfs4_delegation
*dp
)
738 spin_lock(&state_lock
);
739 unhash_delegation_locked(dp
);
740 spin_unlock(&state_lock
);
741 nfs4_put_deleg_lease(dp
->dl_stid
.sc_file
);
742 nfs4_put_stid(&dp
->dl_stid
);
745 static void revoke_delegation(struct nfs4_delegation
*dp
)
747 struct nfs4_client
*clp
= dp
->dl_stid
.sc_client
;
749 WARN_ON(!list_empty(&dp
->dl_recall_lru
));
751 nfs4_put_deleg_lease(dp
->dl_stid
.sc_file
);
753 if (clp
->cl_minorversion
== 0)
754 nfs4_put_stid(&dp
->dl_stid
);
756 dp
->dl_stid
.sc_type
= NFS4_REVOKED_DELEG_STID
;
757 spin_lock(&clp
->cl_lock
);
758 list_add(&dp
->dl_recall_lru
, &clp
->cl_revoked
);
759 spin_unlock(&clp
->cl_lock
);
767 static unsigned int clientid_hashval(u32 id
)
769 return id
& CLIENT_HASH_MASK
;
772 static unsigned int clientstr_hashval(const char *name
)
774 return opaque_hashval(name
, 8) & CLIENT_HASH_MASK
;
778 * We store the NONE, READ, WRITE, and BOTH bits separately in the
779 * st_{access,deny}_bmap field of the stateid, in order to track not
780 * only what share bits are currently in force, but also what
781 * combinations of share bits previous opens have used. This allows us
782 * to enforce the recommendation of rfc 3530 14.2.19 that the server
783 * return an error if the client attempt to downgrade to a combination
784 * of share bits not explicable by closing some of its previous opens.
786 * XXX: This enforcement is actually incomplete, since we don't keep
787 * track of access/deny bit combinations; so, e.g., we allow:
789 * OPEN allow read, deny write
790 * OPEN allow both, deny none
791 * DOWNGRADE allow read, deny none
793 * which we should reject.
796 bmap_to_share_mode(unsigned long bmap
) {
798 unsigned int access
= 0;
800 for (i
= 1; i
< 4; i
++) {
801 if (test_bit(i
, &bmap
))
807 /* set share access for a given stateid */
809 set_access(u32 access
, struct nfs4_ol_stateid
*stp
)
811 unsigned char mask
= 1 << access
;
813 WARN_ON_ONCE(access
> NFS4_SHARE_ACCESS_BOTH
);
814 stp
->st_access_bmap
|= mask
;
817 /* clear share access for a given stateid */
819 clear_access(u32 access
, struct nfs4_ol_stateid
*stp
)
821 unsigned char mask
= 1 << access
;
823 WARN_ON_ONCE(access
> NFS4_SHARE_ACCESS_BOTH
);
824 stp
->st_access_bmap
&= ~mask
;
827 /* test whether a given stateid has access */
829 test_access(u32 access
, struct nfs4_ol_stateid
*stp
)
831 unsigned char mask
= 1 << access
;
833 return (bool)(stp
->st_access_bmap
& mask
);
836 /* set share deny for a given stateid */
838 set_deny(u32 deny
, struct nfs4_ol_stateid
*stp
)
840 unsigned char mask
= 1 << deny
;
842 WARN_ON_ONCE(deny
> NFS4_SHARE_DENY_BOTH
);
843 stp
->st_deny_bmap
|= mask
;
846 /* clear share deny for a given stateid */
848 clear_deny(u32 deny
, struct nfs4_ol_stateid
*stp
)
850 unsigned char mask
= 1 << deny
;
852 WARN_ON_ONCE(deny
> NFS4_SHARE_DENY_BOTH
);
853 stp
->st_deny_bmap
&= ~mask
;
856 /* test whether a given stateid is denying specific access */
858 test_deny(u32 deny
, struct nfs4_ol_stateid
*stp
)
860 unsigned char mask
= 1 << deny
;
862 return (bool)(stp
->st_deny_bmap
& mask
);
865 static int nfs4_access_to_omode(u32 access
)
867 switch (access
& NFS4_SHARE_ACCESS_BOTH
) {
868 case NFS4_SHARE_ACCESS_READ
:
870 case NFS4_SHARE_ACCESS_WRITE
:
872 case NFS4_SHARE_ACCESS_BOTH
:
880 * A stateid that had a deny mode associated with it is being released
881 * or downgraded. Recalculate the deny mode on the file.
884 recalculate_deny_mode(struct nfs4_file
*fp
)
886 struct nfs4_ol_stateid
*stp
;
888 spin_lock(&fp
->fi_lock
);
889 fp
->fi_share_deny
= 0;
890 list_for_each_entry(stp
, &fp
->fi_stateids
, st_perfile
)
891 fp
->fi_share_deny
|= bmap_to_share_mode(stp
->st_deny_bmap
);
892 spin_unlock(&fp
->fi_lock
);
896 reset_union_bmap_deny(u32 deny
, struct nfs4_ol_stateid
*stp
)
901 for (i
= 1; i
< 4; i
++) {
902 if ((i
& deny
) != i
) {
908 /* Recalculate per-file deny mode if there was a change */
910 recalculate_deny_mode(stp
->st_stid
.sc_file
);
913 /* release all access and file references for a given stateid */
915 release_all_access(struct nfs4_ol_stateid
*stp
)
918 struct nfs4_file
*fp
= stp
->st_stid
.sc_file
;
920 if (fp
&& stp
->st_deny_bmap
!= 0)
921 recalculate_deny_mode(fp
);
923 for (i
= 1; i
< 4; i
++) {
924 if (test_access(i
, stp
))
925 nfs4_file_put_access(stp
->st_stid
.sc_file
, i
);
926 clear_access(i
, stp
);
930 static void nfs4_put_stateowner(struct nfs4_stateowner
*sop
)
932 struct nfs4_client
*clp
= sop
->so_client
;
934 might_lock(&clp
->cl_lock
);
936 if (!atomic_dec_and_lock(&sop
->so_count
, &clp
->cl_lock
))
938 sop
->so_ops
->so_unhash(sop
);
939 spin_unlock(&clp
->cl_lock
);
940 kfree(sop
->so_owner
.data
);
941 sop
->so_ops
->so_free(sop
);
944 static void unhash_ol_stateid(struct nfs4_ol_stateid
*stp
)
946 struct nfs4_file
*fp
= stp
->st_stid
.sc_file
;
948 lockdep_assert_held(&stp
->st_stateowner
->so_client
->cl_lock
);
950 spin_lock(&fp
->fi_lock
);
951 list_del(&stp
->st_perfile
);
952 spin_unlock(&fp
->fi_lock
);
953 list_del(&stp
->st_perstateowner
);
956 static void nfs4_free_ol_stateid(struct nfs4_stid
*stid
)
958 struct nfs4_ol_stateid
*stp
= openlockstateid(stid
);
960 release_all_access(stp
);
961 if (stp
->st_stateowner
)
962 nfs4_put_stateowner(stp
->st_stateowner
);
963 kmem_cache_free(stateid_slab
, stid
);
966 static void nfs4_free_lock_stateid(struct nfs4_stid
*stid
)
968 struct nfs4_ol_stateid
*stp
= openlockstateid(stid
);
969 struct nfs4_lockowner
*lo
= lockowner(stp
->st_stateowner
);
972 file
= find_any_file(stp
->st_stid
.sc_file
);
974 filp_close(file
, (fl_owner_t
)lo
);
975 nfs4_free_ol_stateid(stid
);
979 * Put the persistent reference to an already unhashed generic stateid, while
980 * holding the cl_lock. If it's the last reference, then put it onto the
981 * reaplist for later destruction.
983 static void put_ol_stateid_locked(struct nfs4_ol_stateid
*stp
,
984 struct list_head
*reaplist
)
986 struct nfs4_stid
*s
= &stp
->st_stid
;
987 struct nfs4_client
*clp
= s
->sc_client
;
989 lockdep_assert_held(&clp
->cl_lock
);
991 WARN_ON_ONCE(!list_empty(&stp
->st_locks
));
993 if (!atomic_dec_and_test(&s
->sc_count
)) {
994 wake_up_all(&close_wq
);
998 idr_remove(&clp
->cl_stateids
, s
->sc_stateid
.si_opaque
.so_id
);
999 list_add(&stp
->st_locks
, reaplist
);
1002 static void unhash_lock_stateid(struct nfs4_ol_stateid
*stp
)
1004 struct nfs4_openowner
*oo
= openowner(stp
->st_openstp
->st_stateowner
);
1006 lockdep_assert_held(&oo
->oo_owner
.so_client
->cl_lock
);
1008 list_del_init(&stp
->st_locks
);
1009 unhash_ol_stateid(stp
);
1010 unhash_stid(&stp
->st_stid
);
1013 static void release_lock_stateid(struct nfs4_ol_stateid
*stp
)
1015 struct nfs4_openowner
*oo
= openowner(stp
->st_openstp
->st_stateowner
);
1017 spin_lock(&oo
->oo_owner
.so_client
->cl_lock
);
1018 unhash_lock_stateid(stp
);
1019 spin_unlock(&oo
->oo_owner
.so_client
->cl_lock
);
1020 nfs4_put_stid(&stp
->st_stid
);
1023 static void unhash_lockowner_locked(struct nfs4_lockowner
*lo
)
1025 struct nfs4_client
*clp
= lo
->lo_owner
.so_client
;
1027 lockdep_assert_held(&clp
->cl_lock
);
1029 list_del_init(&lo
->lo_owner
.so_strhash
);
1033 * Free a list of generic stateids that were collected earlier after being
1037 free_ol_stateid_reaplist(struct list_head
*reaplist
)
1039 struct nfs4_ol_stateid
*stp
;
1040 struct nfs4_file
*fp
;
1044 while (!list_empty(reaplist
)) {
1045 stp
= list_first_entry(reaplist
, struct nfs4_ol_stateid
,
1047 list_del(&stp
->st_locks
);
1048 fp
= stp
->st_stid
.sc_file
;
1049 stp
->st_stid
.sc_free(&stp
->st_stid
);
1055 static void release_lockowner(struct nfs4_lockowner
*lo
)
1057 struct nfs4_client
*clp
= lo
->lo_owner
.so_client
;
1058 struct nfs4_ol_stateid
*stp
;
1059 struct list_head reaplist
;
1061 INIT_LIST_HEAD(&reaplist
);
1063 spin_lock(&clp
->cl_lock
);
1064 unhash_lockowner_locked(lo
);
1065 while (!list_empty(&lo
->lo_owner
.so_stateids
)) {
1066 stp
= list_first_entry(&lo
->lo_owner
.so_stateids
,
1067 struct nfs4_ol_stateid
, st_perstateowner
);
1068 unhash_lock_stateid(stp
);
1069 put_ol_stateid_locked(stp
, &reaplist
);
1071 spin_unlock(&clp
->cl_lock
);
1072 free_ol_stateid_reaplist(&reaplist
);
1073 nfs4_put_stateowner(&lo
->lo_owner
);
1076 static void release_open_stateid_locks(struct nfs4_ol_stateid
*open_stp
,
1077 struct list_head
*reaplist
)
1079 struct nfs4_ol_stateid
*stp
;
1081 while (!list_empty(&open_stp
->st_locks
)) {
1082 stp
= list_entry(open_stp
->st_locks
.next
,
1083 struct nfs4_ol_stateid
, st_locks
);
1084 unhash_lock_stateid(stp
);
1085 put_ol_stateid_locked(stp
, reaplist
);
1089 static void unhash_open_stateid(struct nfs4_ol_stateid
*stp
,
1090 struct list_head
*reaplist
)
1092 lockdep_assert_held(&stp
->st_stid
.sc_client
->cl_lock
);
1094 unhash_ol_stateid(stp
);
1095 release_open_stateid_locks(stp
, reaplist
);
1098 static void release_open_stateid(struct nfs4_ol_stateid
*stp
)
1100 LIST_HEAD(reaplist
);
1102 spin_lock(&stp
->st_stid
.sc_client
->cl_lock
);
1103 unhash_open_stateid(stp
, &reaplist
);
1104 put_ol_stateid_locked(stp
, &reaplist
);
1105 spin_unlock(&stp
->st_stid
.sc_client
->cl_lock
);
1106 free_ol_stateid_reaplist(&reaplist
);
1109 static void unhash_openowner_locked(struct nfs4_openowner
*oo
)
1111 struct nfs4_client
*clp
= oo
->oo_owner
.so_client
;
1113 lockdep_assert_held(&clp
->cl_lock
);
1115 list_del_init(&oo
->oo_owner
.so_strhash
);
1116 list_del_init(&oo
->oo_perclient
);
1119 static void release_last_closed_stateid(struct nfs4_openowner
*oo
)
1121 struct nfsd_net
*nn
= net_generic(oo
->oo_owner
.so_client
->net
,
1123 struct nfs4_ol_stateid
*s
;
1125 spin_lock(&nn
->client_lock
);
1126 s
= oo
->oo_last_closed_stid
;
1128 list_del_init(&oo
->oo_close_lru
);
1129 oo
->oo_last_closed_stid
= NULL
;
1131 spin_unlock(&nn
->client_lock
);
1133 nfs4_put_stid(&s
->st_stid
);
1136 static void release_openowner(struct nfs4_openowner
*oo
)
1138 struct nfs4_ol_stateid
*stp
;
1139 struct nfs4_client
*clp
= oo
->oo_owner
.so_client
;
1140 struct list_head reaplist
;
1142 INIT_LIST_HEAD(&reaplist
);
1144 spin_lock(&clp
->cl_lock
);
1145 unhash_openowner_locked(oo
);
1146 while (!list_empty(&oo
->oo_owner
.so_stateids
)) {
1147 stp
= list_first_entry(&oo
->oo_owner
.so_stateids
,
1148 struct nfs4_ol_stateid
, st_perstateowner
);
1149 unhash_open_stateid(stp
, &reaplist
);
1150 put_ol_stateid_locked(stp
, &reaplist
);
1152 spin_unlock(&clp
->cl_lock
);
1153 free_ol_stateid_reaplist(&reaplist
);
1154 release_last_closed_stateid(oo
);
1155 nfs4_put_stateowner(&oo
->oo_owner
);
1159 hash_sessionid(struct nfs4_sessionid
*sessionid
)
1161 struct nfsd4_sessionid
*sid
= (struct nfsd4_sessionid
*)sessionid
;
1163 return sid
->sequence
% SESSION_HASH_SIZE
;
1168 dump_sessionid(const char *fn
, struct nfs4_sessionid
*sessionid
)
1170 u32
*ptr
= (u32
*)(&sessionid
->data
[0]);
1171 dprintk("%s: %u:%u:%u:%u\n", fn
, ptr
[0], ptr
[1], ptr
[2], ptr
[3]);
1175 dump_sessionid(const char *fn
, struct nfs4_sessionid
*sessionid
)
1181 * Bump the seqid on cstate->replay_owner, and clear replay_owner if it
1182 * won't be used for replay.
1184 void nfsd4_bump_seqid(struct nfsd4_compound_state
*cstate
, __be32 nfserr
)
1186 struct nfs4_stateowner
*so
= cstate
->replay_owner
;
1188 if (nfserr
== nfserr_replay_me
)
1191 if (!seqid_mutating_err(ntohl(nfserr
))) {
1192 nfsd4_cstate_clear_replay(cstate
);
1197 if (so
->so_is_open_owner
)
1198 release_last_closed_stateid(openowner(so
));
1204 gen_sessionid(struct nfsd4_session
*ses
)
1206 struct nfs4_client
*clp
= ses
->se_client
;
1207 struct nfsd4_sessionid
*sid
;
1209 sid
= (struct nfsd4_sessionid
*)ses
->se_sessionid
.data
;
1210 sid
->clientid
= clp
->cl_clientid
;
1211 sid
->sequence
= current_sessionid
++;
1216 * The protocol defines ca_maxresponssize_cached to include the size of
1217 * the rpc header, but all we need to cache is the data starting after
1218 * the end of the initial SEQUENCE operation--the rest we regenerate
1219 * each time. Therefore we can advertise a ca_maxresponssize_cached
1220 * value that is the number of bytes in our cache plus a few additional
1221 * bytes. In order to stay on the safe side, and not promise more than
1222 * we can cache, those additional bytes must be the minimum possible: 24
1223 * bytes of rpc header (xid through accept state, with AUTH_NULL
1224 * verifier), 12 for the compound header (with zero-length tag), and 44
1225 * for the SEQUENCE op response:
1227 #define NFSD_MIN_HDR_SEQ_SZ (24 + 12 + 44)
1230 free_session_slots(struct nfsd4_session
*ses
)
1234 for (i
= 0; i
< ses
->se_fchannel
.maxreqs
; i
++)
1235 kfree(ses
->se_slots
[i
]);
1239 * We don't actually need to cache the rpc and session headers, so we
1240 * can allocate a little less for each slot:
1242 static inline u32
slot_bytes(struct nfsd4_channel_attrs
*ca
)
1246 if (ca
->maxresp_cached
< NFSD_MIN_HDR_SEQ_SZ
)
1249 size
= ca
->maxresp_cached
- NFSD_MIN_HDR_SEQ_SZ
;
1250 return size
+ sizeof(struct nfsd4_slot
);
1254 * XXX: If we run out of reserved DRC memory we could (up to a point)
1255 * re-negotiate active sessions and reduce their slot usage to make
1256 * room for new connections. For now we just fail the create session.
1258 static u32
nfsd4_get_drc_mem(struct nfsd4_channel_attrs
*ca
)
1260 u32 slotsize
= slot_bytes(ca
);
1261 u32 num
= ca
->maxreqs
;
1264 spin_lock(&nfsd_drc_lock
);
1265 avail
= min((unsigned long)NFSD_MAX_MEM_PER_SESSION
,
1266 nfsd_drc_max_mem
- nfsd_drc_mem_used
);
1267 num
= min_t(int, num
, avail
/ slotsize
);
1268 nfsd_drc_mem_used
+= num
* slotsize
;
1269 spin_unlock(&nfsd_drc_lock
);
1274 static void nfsd4_put_drc_mem(struct nfsd4_channel_attrs
*ca
)
1276 int slotsize
= slot_bytes(ca
);
1278 spin_lock(&nfsd_drc_lock
);
1279 nfsd_drc_mem_used
-= slotsize
* ca
->maxreqs
;
1280 spin_unlock(&nfsd_drc_lock
);
1283 static struct nfsd4_session
*alloc_session(struct nfsd4_channel_attrs
*fattrs
,
1284 struct nfsd4_channel_attrs
*battrs
)
1286 int numslots
= fattrs
->maxreqs
;
1287 int slotsize
= slot_bytes(fattrs
);
1288 struct nfsd4_session
*new;
1291 BUILD_BUG_ON(NFSD_MAX_SLOTS_PER_SESSION
* sizeof(struct nfsd4_slot
*)
1292 + sizeof(struct nfsd4_session
) > PAGE_SIZE
);
1293 mem
= numslots
* sizeof(struct nfsd4_slot
*);
1295 new = kzalloc(sizeof(*new) + mem
, GFP_KERNEL
);
1298 /* allocate each struct nfsd4_slot and data cache in one piece */
1299 for (i
= 0; i
< numslots
; i
++) {
1300 new->se_slots
[i
] = kzalloc(slotsize
, GFP_KERNEL
);
1301 if (!new->se_slots
[i
])
1305 memcpy(&new->se_fchannel
, fattrs
, sizeof(struct nfsd4_channel_attrs
));
1306 memcpy(&new->se_bchannel
, battrs
, sizeof(struct nfsd4_channel_attrs
));
1311 kfree(new->se_slots
[i
]);
1316 static void free_conn(struct nfsd4_conn
*c
)
1318 svc_xprt_put(c
->cn_xprt
);
1322 static void nfsd4_conn_lost(struct svc_xpt_user
*u
)
1324 struct nfsd4_conn
*c
= container_of(u
, struct nfsd4_conn
, cn_xpt_user
);
1325 struct nfs4_client
*clp
= c
->cn_session
->se_client
;
1327 spin_lock(&clp
->cl_lock
);
1328 if (!list_empty(&c
->cn_persession
)) {
1329 list_del(&c
->cn_persession
);
1332 nfsd4_probe_callback(clp
);
1333 spin_unlock(&clp
->cl_lock
);
1336 static struct nfsd4_conn
*alloc_conn(struct svc_rqst
*rqstp
, u32 flags
)
1338 struct nfsd4_conn
*conn
;
1340 conn
= kmalloc(sizeof(struct nfsd4_conn
), GFP_KERNEL
);
1343 svc_xprt_get(rqstp
->rq_xprt
);
1344 conn
->cn_xprt
= rqstp
->rq_xprt
;
1345 conn
->cn_flags
= flags
;
1346 INIT_LIST_HEAD(&conn
->cn_xpt_user
.list
);
1350 static void __nfsd4_hash_conn(struct nfsd4_conn
*conn
, struct nfsd4_session
*ses
)
1352 conn
->cn_session
= ses
;
1353 list_add(&conn
->cn_persession
, &ses
->se_conns
);
1356 static void nfsd4_hash_conn(struct nfsd4_conn
*conn
, struct nfsd4_session
*ses
)
1358 struct nfs4_client
*clp
= ses
->se_client
;
1360 spin_lock(&clp
->cl_lock
);
1361 __nfsd4_hash_conn(conn
, ses
);
1362 spin_unlock(&clp
->cl_lock
);
1365 static int nfsd4_register_conn(struct nfsd4_conn
*conn
)
1367 conn
->cn_xpt_user
.callback
= nfsd4_conn_lost
;
1368 return register_xpt_user(conn
->cn_xprt
, &conn
->cn_xpt_user
);
1371 static void nfsd4_init_conn(struct svc_rqst
*rqstp
, struct nfsd4_conn
*conn
, struct nfsd4_session
*ses
)
1375 nfsd4_hash_conn(conn
, ses
);
1376 ret
= nfsd4_register_conn(conn
);
1378 /* oops; xprt is already down: */
1379 nfsd4_conn_lost(&conn
->cn_xpt_user
);
1380 /* We may have gained or lost a callback channel: */
1381 nfsd4_probe_callback_sync(ses
->se_client
);
1384 static struct nfsd4_conn
*alloc_conn_from_crses(struct svc_rqst
*rqstp
, struct nfsd4_create_session
*cses
)
1386 u32 dir
= NFS4_CDFC4_FORE
;
1388 if (cses
->flags
& SESSION4_BACK_CHAN
)
1389 dir
|= NFS4_CDFC4_BACK
;
1390 return alloc_conn(rqstp
, dir
);
1393 /* must be called under client_lock */
1394 static void nfsd4_del_conns(struct nfsd4_session
*s
)
1396 struct nfs4_client
*clp
= s
->se_client
;
1397 struct nfsd4_conn
*c
;
1399 spin_lock(&clp
->cl_lock
);
1400 while (!list_empty(&s
->se_conns
)) {
1401 c
= list_first_entry(&s
->se_conns
, struct nfsd4_conn
, cn_persession
);
1402 list_del_init(&c
->cn_persession
);
1403 spin_unlock(&clp
->cl_lock
);
1405 unregister_xpt_user(c
->cn_xprt
, &c
->cn_xpt_user
);
1408 spin_lock(&clp
->cl_lock
);
1410 spin_unlock(&clp
->cl_lock
);
1413 static void __free_session(struct nfsd4_session
*ses
)
1415 free_session_slots(ses
);
1419 static void free_session(struct nfsd4_session
*ses
)
1421 nfsd4_del_conns(ses
);
1422 nfsd4_put_drc_mem(&ses
->se_fchannel
);
1423 __free_session(ses
);
1426 static void init_session(struct svc_rqst
*rqstp
, struct nfsd4_session
*new, struct nfs4_client
*clp
, struct nfsd4_create_session
*cses
)
1429 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
1431 new->se_client
= clp
;
1434 INIT_LIST_HEAD(&new->se_conns
);
1436 new->se_cb_seq_nr
= 1;
1437 new->se_flags
= cses
->flags
;
1438 new->se_cb_prog
= cses
->callback_prog
;
1439 new->se_cb_sec
= cses
->cb_sec
;
1440 atomic_set(&new->se_ref
, 0);
1441 idx
= hash_sessionid(&new->se_sessionid
);
1442 list_add(&new->se_hash
, &nn
->sessionid_hashtbl
[idx
]);
1443 spin_lock(&clp
->cl_lock
);
1444 list_add(&new->se_perclnt
, &clp
->cl_sessions
);
1445 spin_unlock(&clp
->cl_lock
);
1447 if (cses
->flags
& SESSION4_BACK_CHAN
) {
1448 struct sockaddr
*sa
= svc_addr(rqstp
);
1450 * This is a little silly; with sessions there's no real
1451 * use for the callback address. Use the peer address
1452 * as a reasonable default for now, but consider fixing
1453 * the rpc client not to require an address in the
1456 rpc_copy_addr((struct sockaddr
*)&clp
->cl_cb_conn
.cb_addr
, sa
);
1457 clp
->cl_cb_conn
.cb_addrlen
= svc_addr_len(sa
);
1461 /* caller must hold client_lock */
1462 static struct nfsd4_session
*
1463 __find_in_sessionid_hashtbl(struct nfs4_sessionid
*sessionid
, struct net
*net
)
1465 struct nfsd4_session
*elem
;
1467 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
1469 lockdep_assert_held(&nn
->client_lock
);
1471 dump_sessionid(__func__
, sessionid
);
1472 idx
= hash_sessionid(sessionid
);
1473 /* Search in the appropriate list */
1474 list_for_each_entry(elem
, &nn
->sessionid_hashtbl
[idx
], se_hash
) {
1475 if (!memcmp(elem
->se_sessionid
.data
, sessionid
->data
,
1476 NFS4_MAX_SESSIONID_LEN
)) {
1481 dprintk("%s: session not found\n", __func__
);
1485 static struct nfsd4_session
*
1486 find_in_sessionid_hashtbl(struct nfs4_sessionid
*sessionid
, struct net
*net
,
1489 struct nfsd4_session
*session
;
1490 __be32 status
= nfserr_badsession
;
1492 session
= __find_in_sessionid_hashtbl(sessionid
, net
);
1495 status
= nfsd4_get_session_locked(session
);
1503 /* caller must hold client_lock */
1505 unhash_session(struct nfsd4_session
*ses
)
1507 struct nfs4_client
*clp
= ses
->se_client
;
1508 struct nfsd_net
*nn
= net_generic(clp
->net
, nfsd_net_id
);
1510 lockdep_assert_held(&nn
->client_lock
);
1512 list_del(&ses
->se_hash
);
1513 spin_lock(&ses
->se_client
->cl_lock
);
1514 list_del(&ses
->se_perclnt
);
1515 spin_unlock(&ses
->se_client
->cl_lock
);
1518 /* SETCLIENTID and SETCLIENTID_CONFIRM Helper functions */
1520 STALE_CLIENTID(clientid_t
*clid
, struct nfsd_net
*nn
)
1522 if (clid
->cl_boot
== nn
->boot_time
)
1524 dprintk("NFSD stale clientid (%08x/%08x) boot_time %08lx\n",
1525 clid
->cl_boot
, clid
->cl_id
, nn
->boot_time
);
1530 * XXX Should we use a slab cache ?
1531 * This type of memory management is somewhat inefficient, but we use it
1532 * anyway since SETCLIENTID is not a common operation.
1534 static struct nfs4_client
*alloc_client(struct xdr_netobj name
)
1536 struct nfs4_client
*clp
;
1539 clp
= kzalloc(sizeof(struct nfs4_client
), GFP_KERNEL
);
1542 clp
->cl_name
.data
= kmemdup(name
.data
, name
.len
, GFP_KERNEL
);
1543 if (clp
->cl_name
.data
== NULL
)
1545 clp
->cl_ownerstr_hashtbl
= kmalloc(sizeof(struct list_head
) *
1546 OWNER_HASH_SIZE
, GFP_KERNEL
);
1547 if (!clp
->cl_ownerstr_hashtbl
)
1548 goto err_no_hashtbl
;
1549 for (i
= 0; i
< OWNER_HASH_SIZE
; i
++)
1550 INIT_LIST_HEAD(&clp
->cl_ownerstr_hashtbl
[i
]);
1551 clp
->cl_name
.len
= name
.len
;
1552 INIT_LIST_HEAD(&clp
->cl_sessions
);
1553 idr_init(&clp
->cl_stateids
);
1554 atomic_set(&clp
->cl_refcount
, 0);
1555 clp
->cl_cb_state
= NFSD4_CB_UNKNOWN
;
1556 INIT_LIST_HEAD(&clp
->cl_idhash
);
1557 INIT_LIST_HEAD(&clp
->cl_openowners
);
1558 INIT_LIST_HEAD(&clp
->cl_delegations
);
1559 INIT_LIST_HEAD(&clp
->cl_lru
);
1560 INIT_LIST_HEAD(&clp
->cl_callbacks
);
1561 INIT_LIST_HEAD(&clp
->cl_revoked
);
1562 spin_lock_init(&clp
->cl_lock
);
1563 rpc_init_wait_queue(&clp
->cl_cb_waitq
, "Backchannel slot table");
1566 kfree(clp
->cl_name
.data
);
1573 free_client(struct nfs4_client
*clp
)
1575 while (!list_empty(&clp
->cl_sessions
)) {
1576 struct nfsd4_session
*ses
;
1577 ses
= list_entry(clp
->cl_sessions
.next
, struct nfsd4_session
,
1579 list_del(&ses
->se_perclnt
);
1580 WARN_ON_ONCE(atomic_read(&ses
->se_ref
));
1583 rpc_destroy_wait_queue(&clp
->cl_cb_waitq
);
1584 free_svc_cred(&clp
->cl_cred
);
1585 kfree(clp
->cl_ownerstr_hashtbl
);
1586 kfree(clp
->cl_name
.data
);
1587 idr_destroy(&clp
->cl_stateids
);
1591 /* must be called under the client_lock */
1593 unhash_client_locked(struct nfs4_client
*clp
)
1595 struct nfsd_net
*nn
= net_generic(clp
->net
, nfsd_net_id
);
1596 struct nfsd4_session
*ses
;
1598 lockdep_assert_held(&nn
->client_lock
);
1600 /* Mark the client as expired! */
1602 /* Make it invisible */
1603 if (!list_empty(&clp
->cl_idhash
)) {
1604 list_del_init(&clp
->cl_idhash
);
1605 if (test_bit(NFSD4_CLIENT_CONFIRMED
, &clp
->cl_flags
))
1606 rb_erase(&clp
->cl_namenode
, &nn
->conf_name_tree
);
1608 rb_erase(&clp
->cl_namenode
, &nn
->unconf_name_tree
);
1610 list_del_init(&clp
->cl_lru
);
1611 spin_lock(&clp
->cl_lock
);
1612 list_for_each_entry(ses
, &clp
->cl_sessions
, se_perclnt
)
1613 list_del_init(&ses
->se_hash
);
1614 spin_unlock(&clp
->cl_lock
);
1618 unhash_client(struct nfs4_client
*clp
)
1620 struct nfsd_net
*nn
= net_generic(clp
->net
, nfsd_net_id
);
1622 spin_lock(&nn
->client_lock
);
1623 unhash_client_locked(clp
);
1624 spin_unlock(&nn
->client_lock
);
1627 static __be32
mark_client_expired_locked(struct nfs4_client
*clp
)
1629 if (atomic_read(&clp
->cl_refcount
))
1630 return nfserr_jukebox
;
1631 unhash_client_locked(clp
);
1636 __destroy_client(struct nfs4_client
*clp
)
1638 struct nfs4_openowner
*oo
;
1639 struct nfs4_delegation
*dp
;
1640 struct list_head reaplist
;
1642 INIT_LIST_HEAD(&reaplist
);
1643 spin_lock(&state_lock
);
1644 while (!list_empty(&clp
->cl_delegations
)) {
1645 dp
= list_entry(clp
->cl_delegations
.next
, struct nfs4_delegation
, dl_perclnt
);
1646 unhash_delegation_locked(dp
);
1647 list_add(&dp
->dl_recall_lru
, &reaplist
);
1649 spin_unlock(&state_lock
);
1650 while (!list_empty(&reaplist
)) {
1651 dp
= list_entry(reaplist
.next
, struct nfs4_delegation
, dl_recall_lru
);
1652 list_del_init(&dp
->dl_recall_lru
);
1653 nfs4_put_deleg_lease(dp
->dl_stid
.sc_file
);
1654 nfs4_put_stid(&dp
->dl_stid
);
1656 while (!list_empty(&clp
->cl_revoked
)) {
1657 dp
= list_entry(clp
->cl_revoked
.next
, struct nfs4_delegation
, dl_recall_lru
);
1658 list_del_init(&dp
->dl_recall_lru
);
1659 nfs4_put_stid(&dp
->dl_stid
);
1661 while (!list_empty(&clp
->cl_openowners
)) {
1662 oo
= list_entry(clp
->cl_openowners
.next
, struct nfs4_openowner
, oo_perclient
);
1663 nfs4_get_stateowner(&oo
->oo_owner
);
1664 release_openowner(oo
);
1666 nfsd4_shutdown_callback(clp
);
1667 if (clp
->cl_cb_conn
.cb_xprt
)
1668 svc_xprt_put(clp
->cl_cb_conn
.cb_xprt
);
1673 destroy_client(struct nfs4_client
*clp
)
1676 __destroy_client(clp
);
1679 static void expire_client(struct nfs4_client
*clp
)
1682 nfsd4_client_record_remove(clp
);
1683 __destroy_client(clp
);
1686 static void copy_verf(struct nfs4_client
*target
, nfs4_verifier
*source
)
1688 memcpy(target
->cl_verifier
.data
, source
->data
,
1689 sizeof(target
->cl_verifier
.data
));
1692 static void copy_clid(struct nfs4_client
*target
, struct nfs4_client
*source
)
1694 target
->cl_clientid
.cl_boot
= source
->cl_clientid
.cl_boot
;
1695 target
->cl_clientid
.cl_id
= source
->cl_clientid
.cl_id
;
1698 static int copy_cred(struct svc_cred
*target
, struct svc_cred
*source
)
1700 if (source
->cr_principal
) {
1701 target
->cr_principal
=
1702 kstrdup(source
->cr_principal
, GFP_KERNEL
);
1703 if (target
->cr_principal
== NULL
)
1706 target
->cr_principal
= NULL
;
1707 target
->cr_flavor
= source
->cr_flavor
;
1708 target
->cr_uid
= source
->cr_uid
;
1709 target
->cr_gid
= source
->cr_gid
;
1710 target
->cr_group_info
= source
->cr_group_info
;
1711 get_group_info(target
->cr_group_info
);
1712 target
->cr_gss_mech
= source
->cr_gss_mech
;
1713 if (source
->cr_gss_mech
)
1714 gss_mech_get(source
->cr_gss_mech
);
1719 compare_blob(const struct xdr_netobj
*o1
, const struct xdr_netobj
*o2
)
1721 if (o1
->len
< o2
->len
)
1723 if (o1
->len
> o2
->len
)
1725 return memcmp(o1
->data
, o2
->data
, o1
->len
);
1728 static int same_name(const char *n1
, const char *n2
)
1730 return 0 == memcmp(n1
, n2
, HEXDIR_LEN
);
1734 same_verf(nfs4_verifier
*v1
, nfs4_verifier
*v2
)
1736 return 0 == memcmp(v1
->data
, v2
->data
, sizeof(v1
->data
));
1740 same_clid(clientid_t
*cl1
, clientid_t
*cl2
)
1742 return (cl1
->cl_boot
== cl2
->cl_boot
) && (cl1
->cl_id
== cl2
->cl_id
);
1745 static bool groups_equal(struct group_info
*g1
, struct group_info
*g2
)
1749 if (g1
->ngroups
!= g2
->ngroups
)
1751 for (i
=0; i
<g1
->ngroups
; i
++)
1752 if (!gid_eq(GROUP_AT(g1
, i
), GROUP_AT(g2
, i
)))
1758 * RFC 3530 language requires clid_inuse be returned when the
1759 * "principal" associated with a requests differs from that previously
1760 * used. We use uid, gid's, and gss principal string as our best
1761 * approximation. We also don't want to allow non-gss use of a client
1762 * established using gss: in theory cr_principal should catch that
1763 * change, but in practice cr_principal can be null even in the gss case
1764 * since gssd doesn't always pass down a principal string.
1766 static bool is_gss_cred(struct svc_cred
*cr
)
1768 /* Is cr_flavor one of the gss "pseudoflavors"?: */
1769 return (cr
->cr_flavor
> RPC_AUTH_MAXFLAVOR
);
1774 same_creds(struct svc_cred
*cr1
, struct svc_cred
*cr2
)
1776 if ((is_gss_cred(cr1
) != is_gss_cred(cr2
))
1777 || (!uid_eq(cr1
->cr_uid
, cr2
->cr_uid
))
1778 || (!gid_eq(cr1
->cr_gid
, cr2
->cr_gid
))
1779 || !groups_equal(cr1
->cr_group_info
, cr2
->cr_group_info
))
1781 if (cr1
->cr_principal
== cr2
->cr_principal
)
1783 if (!cr1
->cr_principal
|| !cr2
->cr_principal
)
1785 return 0 == strcmp(cr1
->cr_principal
, cr2
->cr_principal
);
1788 static bool svc_rqst_integrity_protected(struct svc_rqst
*rqstp
)
1790 struct svc_cred
*cr
= &rqstp
->rq_cred
;
1793 if (!cr
->cr_gss_mech
)
1795 service
= gss_pseudoflavor_to_service(cr
->cr_gss_mech
, cr
->cr_flavor
);
1796 return service
== RPC_GSS_SVC_INTEGRITY
||
1797 service
== RPC_GSS_SVC_PRIVACY
;
1800 static bool mach_creds_match(struct nfs4_client
*cl
, struct svc_rqst
*rqstp
)
1802 struct svc_cred
*cr
= &rqstp
->rq_cred
;
1804 if (!cl
->cl_mach_cred
)
1806 if (cl
->cl_cred
.cr_gss_mech
!= cr
->cr_gss_mech
)
1808 if (!svc_rqst_integrity_protected(rqstp
))
1810 if (!cr
->cr_principal
)
1812 return 0 == strcmp(cl
->cl_cred
.cr_principal
, cr
->cr_principal
);
1815 static void gen_confirm(struct nfs4_client
*clp
, struct nfsd_net
*nn
)
1820 * This is opaque to client, so no need to byte-swap. Use
1821 * __force to keep sparse happy
1823 verf
[0] = (__force __be32
)get_seconds();
1824 verf
[1] = (__force __be32
)nn
->clientid_counter
;
1825 memcpy(clp
->cl_confirm
.data
, verf
, sizeof(clp
->cl_confirm
.data
));
1828 static void gen_clid(struct nfs4_client
*clp
, struct nfsd_net
*nn
)
1830 clp
->cl_clientid
.cl_boot
= nn
->boot_time
;
1831 clp
->cl_clientid
.cl_id
= nn
->clientid_counter
++;
1832 gen_confirm(clp
, nn
);
1835 static struct nfs4_stid
*
1836 find_stateid_locked(struct nfs4_client
*cl
, stateid_t
*t
)
1838 struct nfs4_stid
*ret
;
1840 ret
= idr_find(&cl
->cl_stateids
, t
->si_opaque
.so_id
);
1841 if (!ret
|| !ret
->sc_type
)
1846 static struct nfs4_stid
*
1847 find_stateid_by_type(struct nfs4_client
*cl
, stateid_t
*t
, char typemask
)
1849 struct nfs4_stid
*s
;
1851 spin_lock(&cl
->cl_lock
);
1852 s
= find_stateid_locked(cl
, t
);
1854 if (typemask
& s
->sc_type
)
1855 atomic_inc(&s
->sc_count
);
1859 spin_unlock(&cl
->cl_lock
);
1863 static struct nfs4_client
*create_client(struct xdr_netobj name
,
1864 struct svc_rqst
*rqstp
, nfs4_verifier
*verf
)
1866 struct nfs4_client
*clp
;
1867 struct sockaddr
*sa
= svc_addr(rqstp
);
1869 struct net
*net
= SVC_NET(rqstp
);
1871 clp
= alloc_client(name
);
1875 ret
= copy_cred(&clp
->cl_cred
, &rqstp
->rq_cred
);
1880 nfsd4_init_cb(&clp
->cl_cb_null
, clp
, NULL
, NFSPROC4_CLNT_CB_NULL
);
1881 clp
->cl_time
= get_seconds();
1882 clear_bit(0, &clp
->cl_cb_slot_busy
);
1883 copy_verf(clp
, verf
);
1884 rpc_copy_addr((struct sockaddr
*) &clp
->cl_addr
, sa
);
1885 clp
->cl_cb_session
= NULL
;
1891 add_clp_to_name_tree(struct nfs4_client
*new_clp
, struct rb_root
*root
)
1893 struct rb_node
**new = &(root
->rb_node
), *parent
= NULL
;
1894 struct nfs4_client
*clp
;
1897 clp
= rb_entry(*new, struct nfs4_client
, cl_namenode
);
1900 if (compare_blob(&clp
->cl_name
, &new_clp
->cl_name
) > 0)
1901 new = &((*new)->rb_left
);
1903 new = &((*new)->rb_right
);
1906 rb_link_node(&new_clp
->cl_namenode
, parent
, new);
1907 rb_insert_color(&new_clp
->cl_namenode
, root
);
1910 static struct nfs4_client
*
1911 find_clp_in_name_tree(struct xdr_netobj
*name
, struct rb_root
*root
)
1914 struct rb_node
*node
= root
->rb_node
;
1915 struct nfs4_client
*clp
;
1918 clp
= rb_entry(node
, struct nfs4_client
, cl_namenode
);
1919 cmp
= compare_blob(&clp
->cl_name
, name
);
1921 node
= node
->rb_left
;
1923 node
= node
->rb_right
;
1931 add_to_unconfirmed(struct nfs4_client
*clp
)
1933 unsigned int idhashval
;
1934 struct nfsd_net
*nn
= net_generic(clp
->net
, nfsd_net_id
);
1936 lockdep_assert_held(&nn
->client_lock
);
1938 clear_bit(NFSD4_CLIENT_CONFIRMED
, &clp
->cl_flags
);
1939 add_clp_to_name_tree(clp
, &nn
->unconf_name_tree
);
1940 idhashval
= clientid_hashval(clp
->cl_clientid
.cl_id
);
1941 list_add(&clp
->cl_idhash
, &nn
->unconf_id_hashtbl
[idhashval
]);
1942 renew_client_locked(clp
);
1946 move_to_confirmed(struct nfs4_client
*clp
)
1948 unsigned int idhashval
= clientid_hashval(clp
->cl_clientid
.cl_id
);
1949 struct nfsd_net
*nn
= net_generic(clp
->net
, nfsd_net_id
);
1951 lockdep_assert_held(&nn
->client_lock
);
1953 dprintk("NFSD: move_to_confirm nfs4_client %p\n", clp
);
1954 list_move(&clp
->cl_idhash
, &nn
->conf_id_hashtbl
[idhashval
]);
1955 rb_erase(&clp
->cl_namenode
, &nn
->unconf_name_tree
);
1956 add_clp_to_name_tree(clp
, &nn
->conf_name_tree
);
1957 set_bit(NFSD4_CLIENT_CONFIRMED
, &clp
->cl_flags
);
1958 renew_client_locked(clp
);
1961 static struct nfs4_client
*
1962 find_client_in_id_table(struct list_head
*tbl
, clientid_t
*clid
, bool sessions
)
1964 struct nfs4_client
*clp
;
1965 unsigned int idhashval
= clientid_hashval(clid
->cl_id
);
1967 list_for_each_entry(clp
, &tbl
[idhashval
], cl_idhash
) {
1968 if (same_clid(&clp
->cl_clientid
, clid
)) {
1969 if ((bool)clp
->cl_minorversion
!= sessions
)
1971 renew_client_locked(clp
);
1978 static struct nfs4_client
*
1979 find_confirmed_client(clientid_t
*clid
, bool sessions
, struct nfsd_net
*nn
)
1981 struct list_head
*tbl
= nn
->conf_id_hashtbl
;
1983 lockdep_assert_held(&nn
->client_lock
);
1984 return find_client_in_id_table(tbl
, clid
, sessions
);
1987 static struct nfs4_client
*
1988 find_unconfirmed_client(clientid_t
*clid
, bool sessions
, struct nfsd_net
*nn
)
1990 struct list_head
*tbl
= nn
->unconf_id_hashtbl
;
1992 lockdep_assert_held(&nn
->client_lock
);
1993 return find_client_in_id_table(tbl
, clid
, sessions
);
1996 static bool clp_used_exchangeid(struct nfs4_client
*clp
)
1998 return clp
->cl_exchange_flags
!= 0;
2001 static struct nfs4_client
*
2002 find_confirmed_client_by_name(struct xdr_netobj
*name
, struct nfsd_net
*nn
)
2004 lockdep_assert_held(&nn
->client_lock
);
2005 return find_clp_in_name_tree(name
, &nn
->conf_name_tree
);
2008 static struct nfs4_client
*
2009 find_unconfirmed_client_by_name(struct xdr_netobj
*name
, struct nfsd_net
*nn
)
2011 lockdep_assert_held(&nn
->client_lock
);
2012 return find_clp_in_name_tree(name
, &nn
->unconf_name_tree
);
2016 gen_callback(struct nfs4_client
*clp
, struct nfsd4_setclientid
*se
, struct svc_rqst
*rqstp
)
2018 struct nfs4_cb_conn
*conn
= &clp
->cl_cb_conn
;
2019 struct sockaddr
*sa
= svc_addr(rqstp
);
2020 u32 scopeid
= rpc_get_scope_id(sa
);
2021 unsigned short expected_family
;
2023 /* Currently, we only support tcp and tcp6 for the callback channel */
2024 if (se
->se_callback_netid_len
== 3 &&
2025 !memcmp(se
->se_callback_netid_val
, "tcp", 3))
2026 expected_family
= AF_INET
;
2027 else if (se
->se_callback_netid_len
== 4 &&
2028 !memcmp(se
->se_callback_netid_val
, "tcp6", 4))
2029 expected_family
= AF_INET6
;
2033 conn
->cb_addrlen
= rpc_uaddr2sockaddr(clp
->net
, se
->se_callback_addr_val
,
2034 se
->se_callback_addr_len
,
2035 (struct sockaddr
*)&conn
->cb_addr
,
2036 sizeof(conn
->cb_addr
));
2038 if (!conn
->cb_addrlen
|| conn
->cb_addr
.ss_family
!= expected_family
)
2041 if (conn
->cb_addr
.ss_family
== AF_INET6
)
2042 ((struct sockaddr_in6
*)&conn
->cb_addr
)->sin6_scope_id
= scopeid
;
2044 conn
->cb_prog
= se
->se_callback_prog
;
2045 conn
->cb_ident
= se
->se_callback_ident
;
2046 memcpy(&conn
->cb_saddr
, &rqstp
->rq_daddr
, rqstp
->rq_daddrlen
);
2049 conn
->cb_addr
.ss_family
= AF_UNSPEC
;
2050 conn
->cb_addrlen
= 0;
2051 dprintk(KERN_INFO
"NFSD: this client (clientid %08x/%08x) "
2052 "will not receive delegations\n",
2053 clp
->cl_clientid
.cl_boot
, clp
->cl_clientid
.cl_id
);
2059 * Cache a reply. nfsd4_check_resp_size() has bounded the cache size.
2062 nfsd4_store_cache_entry(struct nfsd4_compoundres
*resp
)
2064 struct xdr_buf
*buf
= resp
->xdr
.buf
;
2065 struct nfsd4_slot
*slot
= resp
->cstate
.slot
;
2068 dprintk("--> %s slot %p\n", __func__
, slot
);
2070 slot
->sl_opcnt
= resp
->opcnt
;
2071 slot
->sl_status
= resp
->cstate
.status
;
2073 slot
->sl_flags
|= NFSD4_SLOT_INITIALIZED
;
2074 if (nfsd4_not_cached(resp
)) {
2075 slot
->sl_datalen
= 0;
2078 base
= resp
->cstate
.data_offset
;
2079 slot
->sl_datalen
= buf
->len
- base
;
2080 if (read_bytes_from_xdr_buf(buf
, base
, slot
->sl_data
, slot
->sl_datalen
))
2081 WARN("%s: sessions DRC could not cache compound\n", __func__
);
2086 * Encode the replay sequence operation from the slot values.
2087 * If cachethis is FALSE encode the uncached rep error on the next
2088 * operation which sets resp->p and increments resp->opcnt for
2089 * nfs4svc_encode_compoundres.
2093 nfsd4_enc_sequence_replay(struct nfsd4_compoundargs
*args
,
2094 struct nfsd4_compoundres
*resp
)
2096 struct nfsd4_op
*op
;
2097 struct nfsd4_slot
*slot
= resp
->cstate
.slot
;
2099 /* Encode the replayed sequence operation */
2100 op
= &args
->ops
[resp
->opcnt
- 1];
2101 nfsd4_encode_operation(resp
, op
);
2103 /* Return nfserr_retry_uncached_rep in next operation. */
2104 if (args
->opcnt
> 1 && !(slot
->sl_flags
& NFSD4_SLOT_CACHETHIS
)) {
2105 op
= &args
->ops
[resp
->opcnt
++];
2106 op
->status
= nfserr_retry_uncached_rep
;
2107 nfsd4_encode_operation(resp
, op
);
2113 * The sequence operation is not cached because we can use the slot and
2117 nfsd4_replay_cache_entry(struct nfsd4_compoundres
*resp
,
2118 struct nfsd4_sequence
*seq
)
2120 struct nfsd4_slot
*slot
= resp
->cstate
.slot
;
2121 struct xdr_stream
*xdr
= &resp
->xdr
;
2125 dprintk("--> %s slot %p\n", __func__
, slot
);
2127 status
= nfsd4_enc_sequence_replay(resp
->rqstp
->rq_argp
, resp
);
2131 p
= xdr_reserve_space(xdr
, slot
->sl_datalen
);
2134 return nfserr_serverfault
;
2136 xdr_encode_opaque_fixed(p
, slot
->sl_data
, slot
->sl_datalen
);
2137 xdr_commit_encode(xdr
);
2139 resp
->opcnt
= slot
->sl_opcnt
;
2140 return slot
->sl_status
;
2144 * Set the exchange_id flags returned by the server.
2147 nfsd4_set_ex_flags(struct nfs4_client
*new, struct nfsd4_exchange_id
*clid
)
2149 /* pNFS is not supported */
2150 new->cl_exchange_flags
|= EXCHGID4_FLAG_USE_NON_PNFS
;
2152 /* Referrals are supported, Migration is not. */
2153 new->cl_exchange_flags
|= EXCHGID4_FLAG_SUPP_MOVED_REFER
;
2155 /* set the wire flags to return to client. */
2156 clid
->flags
= new->cl_exchange_flags
;
2159 static bool client_has_state(struct nfs4_client
*clp
)
2162 * Note clp->cl_openowners check isn't quite right: there's no
2163 * need to count owners without stateid's.
2165 * Also note we should probably be using this in 4.0 case too.
2167 return !list_empty(&clp
->cl_openowners
)
2168 || !list_empty(&clp
->cl_delegations
)
2169 || !list_empty(&clp
->cl_sessions
);
2173 nfsd4_exchange_id(struct svc_rqst
*rqstp
,
2174 struct nfsd4_compound_state
*cstate
,
2175 struct nfsd4_exchange_id
*exid
)
2177 struct nfs4_client
*conf
, *new;
2178 struct nfs4_client
*unconf
= NULL
;
2180 char addr_str
[INET6_ADDRSTRLEN
];
2181 nfs4_verifier verf
= exid
->verifier
;
2182 struct sockaddr
*sa
= svc_addr(rqstp
);
2183 bool update
= exid
->flags
& EXCHGID4_FLAG_UPD_CONFIRMED_REC_A
;
2184 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
2186 rpc_ntop(sa
, addr_str
, sizeof(addr_str
));
2187 dprintk("%s rqstp=%p exid=%p clname.len=%u clname.data=%p "
2188 "ip_addr=%s flags %x, spa_how %d\n",
2189 __func__
, rqstp
, exid
, exid
->clname
.len
, exid
->clname
.data
,
2190 addr_str
, exid
->flags
, exid
->spa_how
);
2192 if (exid
->flags
& ~EXCHGID4_FLAG_MASK_A
)
2193 return nfserr_inval
;
2195 switch (exid
->spa_how
) {
2197 if (!svc_rqst_integrity_protected(rqstp
))
2198 return nfserr_inval
;
2201 default: /* checked by xdr code */
2204 return nfserr_encr_alg_unsupp
;
2207 new = create_client(exid
->clname
, rqstp
, &verf
);
2209 return nfserr_jukebox
;
2211 /* Cases below refer to rfc 5661 section 18.35.4: */
2212 spin_lock(&nn
->client_lock
);
2213 conf
= find_confirmed_client_by_name(&exid
->clname
, nn
);
2215 bool creds_match
= same_creds(&conf
->cl_cred
, &rqstp
->rq_cred
);
2216 bool verfs_match
= same_verf(&verf
, &conf
->cl_verifier
);
2219 if (!clp_used_exchangeid(conf
)) { /* buggy client */
2220 status
= nfserr_inval
;
2223 if (!mach_creds_match(conf
, rqstp
)) {
2224 status
= nfserr_wrong_cred
;
2227 if (!creds_match
) { /* case 9 */
2228 status
= nfserr_perm
;
2231 if (!verfs_match
) { /* case 8 */
2232 status
= nfserr_not_same
;
2236 exid
->flags
|= EXCHGID4_FLAG_CONFIRMED_R
;
2239 if (!creds_match
) { /* case 3 */
2240 if (client_has_state(conf
)) {
2241 status
= nfserr_clid_inuse
;
2246 if (verfs_match
) { /* case 2 */
2247 conf
->cl_exchange_flags
|= EXCHGID4_FLAG_CONFIRMED_R
;
2250 /* case 5, client reboot */
2255 if (update
) { /* case 7 */
2256 status
= nfserr_noent
;
2260 unconf
= find_unconfirmed_client_by_name(&exid
->clname
, nn
);
2261 if (unconf
) /* case 4, possible retry or client restart */
2262 unhash_client_locked(unconf
);
2264 /* case 1 (normal case) */
2267 status
= mark_client_expired_locked(conf
);
2271 new->cl_minorversion
= cstate
->minorversion
;
2272 new->cl_mach_cred
= (exid
->spa_how
== SP4_MACH_CRED
);
2275 add_to_unconfirmed(new);
2278 exid
->clientid
.cl_boot
= conf
->cl_clientid
.cl_boot
;
2279 exid
->clientid
.cl_id
= conf
->cl_clientid
.cl_id
;
2281 exid
->seqid
= conf
->cl_cs_slot
.sl_seqid
+ 1;
2282 nfsd4_set_ex_flags(conf
, exid
);
2284 dprintk("nfsd4_exchange_id seqid %d flags %x\n",
2285 conf
->cl_cs_slot
.sl_seqid
, conf
->cl_exchange_flags
);
2289 spin_unlock(&nn
->client_lock
);
2293 expire_client(unconf
);
2298 check_slot_seqid(u32 seqid
, u32 slot_seqid
, int slot_inuse
)
2300 dprintk("%s enter. seqid %d slot_seqid %d\n", __func__
, seqid
,
2303 /* The slot is in use, and no response has been sent. */
2305 if (seqid
== slot_seqid
)
2306 return nfserr_jukebox
;
2308 return nfserr_seq_misordered
;
2310 /* Note unsigned 32-bit arithmetic handles wraparound: */
2311 if (likely(seqid
== slot_seqid
+ 1))
2313 if (seqid
== slot_seqid
)
2314 return nfserr_replay_cache
;
2315 return nfserr_seq_misordered
;
2319 * Cache the create session result into the create session single DRC
2320 * slot cache by saving the xdr structure. sl_seqid has been set.
2321 * Do this for solo or embedded create session operations.
2324 nfsd4_cache_create_session(struct nfsd4_create_session
*cr_ses
,
2325 struct nfsd4_clid_slot
*slot
, __be32 nfserr
)
2327 slot
->sl_status
= nfserr
;
2328 memcpy(&slot
->sl_cr_ses
, cr_ses
, sizeof(*cr_ses
));
2332 nfsd4_replay_create_session(struct nfsd4_create_session
*cr_ses
,
2333 struct nfsd4_clid_slot
*slot
)
2335 memcpy(cr_ses
, &slot
->sl_cr_ses
, sizeof(*cr_ses
));
2336 return slot
->sl_status
;
2339 #define NFSD_MIN_REQ_HDR_SEQ_SZ ((\
2340 2 * 2 + /* credential,verifier: AUTH_NULL, length 0 */ \
2341 1 + /* MIN tag is length with zero, only length */ \
2342 3 + /* version, opcount, opcode */ \
2343 XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
2344 /* seqid, slotID, slotID, cache */ \
2345 4 ) * sizeof(__be32))
2347 #define NFSD_MIN_RESP_HDR_SEQ_SZ ((\
2348 2 + /* verifier: AUTH_NULL, length 0 */\
2350 1 + /* MIN tag is length with zero, only length */ \
2351 3 + /* opcount, opcode, opstatus*/ \
2352 XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
2353 /* seqid, slotID, slotID, slotID, status */ \
2354 5 ) * sizeof(__be32))
2356 static __be32
check_forechannel_attrs(struct nfsd4_channel_attrs
*ca
, struct nfsd_net
*nn
)
2358 u32 maxrpc
= nn
->nfsd_serv
->sv_max_mesg
;
2360 if (ca
->maxreq_sz
< NFSD_MIN_REQ_HDR_SEQ_SZ
)
2361 return nfserr_toosmall
;
2362 if (ca
->maxresp_sz
< NFSD_MIN_RESP_HDR_SEQ_SZ
)
2363 return nfserr_toosmall
;
2364 ca
->headerpadsz
= 0;
2365 ca
->maxreq_sz
= min_t(u32
, ca
->maxreq_sz
, maxrpc
);
2366 ca
->maxresp_sz
= min_t(u32
, ca
->maxresp_sz
, maxrpc
);
2367 ca
->maxops
= min_t(u32
, ca
->maxops
, NFSD_MAX_OPS_PER_COMPOUND
);
2368 ca
->maxresp_cached
= min_t(u32
, ca
->maxresp_cached
,
2369 NFSD_SLOT_CACHE_SIZE
+ NFSD_MIN_HDR_SEQ_SZ
);
2370 ca
->maxreqs
= min_t(u32
, ca
->maxreqs
, NFSD_MAX_SLOTS_PER_SESSION
);
2372 * Note decreasing slot size below client's request may make it
2373 * difficult for client to function correctly, whereas
2374 * decreasing the number of slots will (just?) affect
2375 * performance. When short on memory we therefore prefer to
2376 * decrease number of slots instead of their size. Clients that
2377 * request larger slots than they need will get poor results:
2379 ca
->maxreqs
= nfsd4_get_drc_mem(ca
);
2381 return nfserr_jukebox
;
2386 #define NFSD_CB_MAX_REQ_SZ ((NFS4_enc_cb_recall_sz + \
2387 RPC_MAX_HEADER_WITH_AUTH) * sizeof(__be32))
2388 #define NFSD_CB_MAX_RESP_SZ ((NFS4_dec_cb_recall_sz + \
2389 RPC_MAX_REPHEADER_WITH_AUTH) * sizeof(__be32))
2391 static __be32
check_backchannel_attrs(struct nfsd4_channel_attrs
*ca
)
2393 ca
->headerpadsz
= 0;
2396 * These RPC_MAX_HEADER macros are overkill, especially since we
2397 * don't even do gss on the backchannel yet. But this is still
2398 * less than 1k. Tighten up this estimate in the unlikely event
2399 * it turns out to be a problem for some client:
2401 if (ca
->maxreq_sz
< NFSD_CB_MAX_REQ_SZ
)
2402 return nfserr_toosmall
;
2403 if (ca
->maxresp_sz
< NFSD_CB_MAX_RESP_SZ
)
2404 return nfserr_toosmall
;
2405 ca
->maxresp_cached
= 0;
2407 return nfserr_toosmall
;
2412 static __be32
nfsd4_check_cb_sec(struct nfsd4_cb_sec
*cbs
)
2414 switch (cbs
->flavor
) {
2420 * GSS case: the spec doesn't allow us to return this
2421 * error. But it also doesn't allow us not to support
2423 * I'd rather this fail hard than return some error the
2424 * client might think it can already handle:
2426 return nfserr_encr_alg_unsupp
;
2431 nfsd4_create_session(struct svc_rqst
*rqstp
,
2432 struct nfsd4_compound_state
*cstate
,
2433 struct nfsd4_create_session
*cr_ses
)
2435 struct sockaddr
*sa
= svc_addr(rqstp
);
2436 struct nfs4_client
*conf
, *unconf
;
2437 struct nfs4_client
*old
= NULL
;
2438 struct nfsd4_session
*new;
2439 struct nfsd4_conn
*conn
;
2440 struct nfsd4_clid_slot
*cs_slot
= NULL
;
2442 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
2444 if (cr_ses
->flags
& ~SESSION4_FLAG_MASK_A
)
2445 return nfserr_inval
;
2446 status
= nfsd4_check_cb_sec(&cr_ses
->cb_sec
);
2449 status
= check_forechannel_attrs(&cr_ses
->fore_channel
, nn
);
2452 status
= check_backchannel_attrs(&cr_ses
->back_channel
);
2454 goto out_release_drc_mem
;
2455 status
= nfserr_jukebox
;
2456 new = alloc_session(&cr_ses
->fore_channel
, &cr_ses
->back_channel
);
2458 goto out_release_drc_mem
;
2459 conn
= alloc_conn_from_crses(rqstp
, cr_ses
);
2461 goto out_free_session
;
2463 spin_lock(&nn
->client_lock
);
2464 unconf
= find_unconfirmed_client(&cr_ses
->clientid
, true, nn
);
2465 conf
= find_confirmed_client(&cr_ses
->clientid
, true, nn
);
2466 WARN_ON_ONCE(conf
&& unconf
);
2469 status
= nfserr_wrong_cred
;
2470 if (!mach_creds_match(conf
, rqstp
))
2472 cs_slot
= &conf
->cl_cs_slot
;
2473 status
= check_slot_seqid(cr_ses
->seqid
, cs_slot
->sl_seqid
, 0);
2474 if (status
== nfserr_replay_cache
) {
2475 status
= nfsd4_replay_create_session(cr_ses
, cs_slot
);
2477 } else if (cr_ses
->seqid
!= cs_slot
->sl_seqid
+ 1) {
2478 status
= nfserr_seq_misordered
;
2481 } else if (unconf
) {
2482 if (!same_creds(&unconf
->cl_cred
, &rqstp
->rq_cred
) ||
2483 !rpc_cmp_addr(sa
, (struct sockaddr
*) &unconf
->cl_addr
)) {
2484 status
= nfserr_clid_inuse
;
2487 status
= nfserr_wrong_cred
;
2488 if (!mach_creds_match(unconf
, rqstp
))
2490 cs_slot
= &unconf
->cl_cs_slot
;
2491 status
= check_slot_seqid(cr_ses
->seqid
, cs_slot
->sl_seqid
, 0);
2493 /* an unconfirmed replay returns misordered */
2494 status
= nfserr_seq_misordered
;
2497 old
= find_confirmed_client_by_name(&unconf
->cl_name
, nn
);
2499 status
= mark_client_expired_locked(old
);
2505 move_to_confirmed(unconf
);
2508 status
= nfserr_stale_clientid
;
2513 * We do not support RDMA or persistent sessions
2515 cr_ses
->flags
&= ~SESSION4_PERSIST
;
2516 cr_ses
->flags
&= ~SESSION4_RDMA
;
2518 init_session(rqstp
, new, conf
, cr_ses
);
2519 nfsd4_get_session_locked(new);
2521 memcpy(cr_ses
->sessionid
.data
, new->se_sessionid
.data
,
2522 NFS4_MAX_SESSIONID_LEN
);
2523 cs_slot
->sl_seqid
++;
2524 cr_ses
->seqid
= cs_slot
->sl_seqid
;
2526 /* cache solo and embedded create sessions under the client_lock */
2527 nfsd4_cache_create_session(cr_ses
, cs_slot
, status
);
2528 spin_unlock(&nn
->client_lock
);
2529 /* init connection and backchannel */
2530 nfsd4_init_conn(rqstp
, conn
, new);
2531 nfsd4_put_session(new);
2536 spin_unlock(&nn
->client_lock
);
2541 __free_session(new);
2542 out_release_drc_mem
:
2543 nfsd4_put_drc_mem(&cr_ses
->fore_channel
);
2547 static __be32
nfsd4_map_bcts_dir(u32
*dir
)
2550 case NFS4_CDFC4_FORE
:
2551 case NFS4_CDFC4_BACK
:
2553 case NFS4_CDFC4_FORE_OR_BOTH
:
2554 case NFS4_CDFC4_BACK_OR_BOTH
:
2555 *dir
= NFS4_CDFC4_BOTH
;
2558 return nfserr_inval
;
2561 __be32
nfsd4_backchannel_ctl(struct svc_rqst
*rqstp
, struct nfsd4_compound_state
*cstate
, struct nfsd4_backchannel_ctl
*bc
)
2563 struct nfsd4_session
*session
= cstate
->session
;
2564 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
2567 status
= nfsd4_check_cb_sec(&bc
->bc_cb_sec
);
2570 spin_lock(&nn
->client_lock
);
2571 session
->se_cb_prog
= bc
->bc_cb_program
;
2572 session
->se_cb_sec
= bc
->bc_cb_sec
;
2573 spin_unlock(&nn
->client_lock
);
2575 nfsd4_probe_callback(session
->se_client
);
2580 __be32
nfsd4_bind_conn_to_session(struct svc_rqst
*rqstp
,
2581 struct nfsd4_compound_state
*cstate
,
2582 struct nfsd4_bind_conn_to_session
*bcts
)
2585 struct nfsd4_conn
*conn
;
2586 struct nfsd4_session
*session
;
2587 struct net
*net
= SVC_NET(rqstp
);
2588 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
2590 if (!nfsd4_last_compound_op(rqstp
))
2591 return nfserr_not_only_op
;
2592 spin_lock(&nn
->client_lock
);
2593 session
= find_in_sessionid_hashtbl(&bcts
->sessionid
, net
, &status
);
2594 spin_unlock(&nn
->client_lock
);
2596 goto out_no_session
;
2597 status
= nfserr_wrong_cred
;
2598 if (!mach_creds_match(session
->se_client
, rqstp
))
2600 status
= nfsd4_map_bcts_dir(&bcts
->dir
);
2603 conn
= alloc_conn(rqstp
, bcts
->dir
);
2604 status
= nfserr_jukebox
;
2607 nfsd4_init_conn(rqstp
, conn
, session
);
2610 nfsd4_put_session(session
);
2615 static bool nfsd4_compound_in_session(struct nfsd4_session
*session
, struct nfs4_sessionid
*sid
)
2619 return !memcmp(sid
, &session
->se_sessionid
, sizeof(*sid
));
2623 nfsd4_destroy_session(struct svc_rqst
*r
,
2624 struct nfsd4_compound_state
*cstate
,
2625 struct nfsd4_destroy_session
*sessionid
)
2627 struct nfsd4_session
*ses
;
2629 int ref_held_by_me
= 0;
2630 struct net
*net
= SVC_NET(r
);
2631 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
2633 status
= nfserr_not_only_op
;
2634 if (nfsd4_compound_in_session(cstate
->session
, &sessionid
->sessionid
)) {
2635 if (!nfsd4_last_compound_op(r
))
2639 dump_sessionid(__func__
, &sessionid
->sessionid
);
2640 spin_lock(&nn
->client_lock
);
2641 ses
= find_in_sessionid_hashtbl(&sessionid
->sessionid
, net
, &status
);
2643 goto out_client_lock
;
2644 status
= nfserr_wrong_cred
;
2645 if (!mach_creds_match(ses
->se_client
, r
))
2646 goto out_put_session
;
2647 status
= mark_session_dead_locked(ses
, 1 + ref_held_by_me
);
2649 goto out_put_session
;
2650 unhash_session(ses
);
2651 spin_unlock(&nn
->client_lock
);
2653 nfsd4_probe_callback_sync(ses
->se_client
);
2655 spin_lock(&nn
->client_lock
);
2658 nfsd4_put_session_locked(ses
);
2660 spin_unlock(&nn
->client_lock
);
2665 static struct nfsd4_conn
*__nfsd4_find_conn(struct svc_xprt
*xpt
, struct nfsd4_session
*s
)
2667 struct nfsd4_conn
*c
;
2669 list_for_each_entry(c
, &s
->se_conns
, cn_persession
) {
2670 if (c
->cn_xprt
== xpt
) {
2677 static __be32
nfsd4_sequence_check_conn(struct nfsd4_conn
*new, struct nfsd4_session
*ses
)
2679 struct nfs4_client
*clp
= ses
->se_client
;
2680 struct nfsd4_conn
*c
;
2681 __be32 status
= nfs_ok
;
2684 spin_lock(&clp
->cl_lock
);
2685 c
= __nfsd4_find_conn(new->cn_xprt
, ses
);
2688 status
= nfserr_conn_not_bound_to_session
;
2689 if (clp
->cl_mach_cred
)
2691 __nfsd4_hash_conn(new, ses
);
2692 spin_unlock(&clp
->cl_lock
);
2693 ret
= nfsd4_register_conn(new);
2695 /* oops; xprt is already down: */
2696 nfsd4_conn_lost(&new->cn_xpt_user
);
2699 spin_unlock(&clp
->cl_lock
);
2704 static bool nfsd4_session_too_many_ops(struct svc_rqst
*rqstp
, struct nfsd4_session
*session
)
2706 struct nfsd4_compoundargs
*args
= rqstp
->rq_argp
;
2708 return args
->opcnt
> session
->se_fchannel
.maxops
;
2711 static bool nfsd4_request_too_big(struct svc_rqst
*rqstp
,
2712 struct nfsd4_session
*session
)
2714 struct xdr_buf
*xb
= &rqstp
->rq_arg
;
2716 return xb
->len
> session
->se_fchannel
.maxreq_sz
;
2720 nfsd4_sequence(struct svc_rqst
*rqstp
,
2721 struct nfsd4_compound_state
*cstate
,
2722 struct nfsd4_sequence
*seq
)
2724 struct nfsd4_compoundres
*resp
= rqstp
->rq_resp
;
2725 struct xdr_stream
*xdr
= &resp
->xdr
;
2726 struct nfsd4_session
*session
;
2727 struct nfs4_client
*clp
;
2728 struct nfsd4_slot
*slot
;
2729 struct nfsd4_conn
*conn
;
2732 struct net
*net
= SVC_NET(rqstp
);
2733 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
2735 if (resp
->opcnt
!= 1)
2736 return nfserr_sequence_pos
;
2739 * Will be either used or freed by nfsd4_sequence_check_conn
2742 conn
= alloc_conn(rqstp
, NFS4_CDFC4_FORE
);
2744 return nfserr_jukebox
;
2746 spin_lock(&nn
->client_lock
);
2747 session
= find_in_sessionid_hashtbl(&seq
->sessionid
, net
, &status
);
2749 goto out_no_session
;
2750 clp
= session
->se_client
;
2752 status
= nfserr_too_many_ops
;
2753 if (nfsd4_session_too_many_ops(rqstp
, session
))
2754 goto out_put_session
;
2756 status
= nfserr_req_too_big
;
2757 if (nfsd4_request_too_big(rqstp
, session
))
2758 goto out_put_session
;
2760 status
= nfserr_badslot
;
2761 if (seq
->slotid
>= session
->se_fchannel
.maxreqs
)
2762 goto out_put_session
;
2764 slot
= session
->se_slots
[seq
->slotid
];
2765 dprintk("%s: slotid %d\n", __func__
, seq
->slotid
);
2767 /* We do not negotiate the number of slots yet, so set the
2768 * maxslots to the session maxreqs which is used to encode
2769 * sr_highest_slotid and the sr_target_slot id to maxslots */
2770 seq
->maxslots
= session
->se_fchannel
.maxreqs
;
2772 status
= check_slot_seqid(seq
->seqid
, slot
->sl_seqid
,
2773 slot
->sl_flags
& NFSD4_SLOT_INUSE
);
2774 if (status
== nfserr_replay_cache
) {
2775 status
= nfserr_seq_misordered
;
2776 if (!(slot
->sl_flags
& NFSD4_SLOT_INITIALIZED
))
2777 goto out_put_session
;
2778 cstate
->slot
= slot
;
2779 cstate
->session
= session
;
2781 /* Return the cached reply status and set cstate->status
2782 * for nfsd4_proc_compound processing */
2783 status
= nfsd4_replay_cache_entry(resp
, seq
);
2784 cstate
->status
= nfserr_replay_cache
;
2788 goto out_put_session
;
2790 status
= nfsd4_sequence_check_conn(conn
, session
);
2793 goto out_put_session
;
2795 buflen
= (seq
->cachethis
) ?
2796 session
->se_fchannel
.maxresp_cached
:
2797 session
->se_fchannel
.maxresp_sz
;
2798 status
= (seq
->cachethis
) ? nfserr_rep_too_big_to_cache
:
2800 if (xdr_restrict_buflen(xdr
, buflen
- rqstp
->rq_auth_slack
))
2801 goto out_put_session
;
2802 svc_reserve(rqstp
, buflen
);
2805 /* Success! bump slot seqid */
2806 slot
->sl_seqid
= seq
->seqid
;
2807 slot
->sl_flags
|= NFSD4_SLOT_INUSE
;
2809 slot
->sl_flags
|= NFSD4_SLOT_CACHETHIS
;
2811 slot
->sl_flags
&= ~NFSD4_SLOT_CACHETHIS
;
2813 cstate
->slot
= slot
;
2814 cstate
->session
= session
;
2818 switch (clp
->cl_cb_state
) {
2820 seq
->status_flags
= SEQ4_STATUS_CB_PATH_DOWN
;
2822 case NFSD4_CB_FAULT
:
2823 seq
->status_flags
= SEQ4_STATUS_BACKCHANNEL_FAULT
;
2826 seq
->status_flags
= 0;
2828 if (!list_empty(&clp
->cl_revoked
))
2829 seq
->status_flags
|= SEQ4_STATUS_RECALLABLE_STATE_REVOKED
;
2833 spin_unlock(&nn
->client_lock
);
2836 nfsd4_put_session_locked(session
);
2837 goto out_no_session
;
2841 nfsd4_sequence_done(struct nfsd4_compoundres
*resp
)
2843 struct nfsd4_compound_state
*cs
= &resp
->cstate
;
2845 if (nfsd4_has_session(cs
)) {
2846 if (cs
->status
!= nfserr_replay_cache
) {
2847 nfsd4_store_cache_entry(resp
);
2848 cs
->slot
->sl_flags
&= ~NFSD4_SLOT_INUSE
;
2850 /* Drop session reference that was taken in nfsd4_sequence() */
2851 nfsd4_put_session(cs
->session
);
2853 put_client_renew(cs
->clp
);
2857 nfsd4_destroy_clientid(struct svc_rqst
*rqstp
, struct nfsd4_compound_state
*cstate
, struct nfsd4_destroy_clientid
*dc
)
2859 struct nfs4_client
*conf
, *unconf
;
2860 struct nfs4_client
*clp
= NULL
;
2862 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
2864 spin_lock(&nn
->client_lock
);
2865 unconf
= find_unconfirmed_client(&dc
->clientid
, true, nn
);
2866 conf
= find_confirmed_client(&dc
->clientid
, true, nn
);
2867 WARN_ON_ONCE(conf
&& unconf
);
2870 if (client_has_state(conf
)) {
2871 status
= nfserr_clientid_busy
;
2874 status
= mark_client_expired_locked(conf
);
2881 status
= nfserr_stale_clientid
;
2884 if (!mach_creds_match(clp
, rqstp
)) {
2886 status
= nfserr_wrong_cred
;
2889 unhash_client_locked(clp
);
2891 spin_unlock(&nn
->client_lock
);
2898 nfsd4_reclaim_complete(struct svc_rqst
*rqstp
, struct nfsd4_compound_state
*cstate
, struct nfsd4_reclaim_complete
*rc
)
2902 if (rc
->rca_one_fs
) {
2903 if (!cstate
->current_fh
.fh_dentry
)
2904 return nfserr_nofilehandle
;
2906 * We don't take advantage of the rca_one_fs case.
2907 * That's OK, it's optional, we can safely ignore it.
2912 status
= nfserr_complete_already
;
2913 if (test_and_set_bit(NFSD4_CLIENT_RECLAIM_COMPLETE
,
2914 &cstate
->session
->se_client
->cl_flags
))
2917 status
= nfserr_stale_clientid
;
2918 if (is_client_expired(cstate
->session
->se_client
))
2920 * The following error isn't really legal.
2921 * But we only get here if the client just explicitly
2922 * destroyed the client. Surely it no longer cares what
2923 * error it gets back on an operation for the dead
2929 nfsd4_client_record_create(cstate
->session
->se_client
);
2935 nfsd4_setclientid(struct svc_rqst
*rqstp
, struct nfsd4_compound_state
*cstate
,
2936 struct nfsd4_setclientid
*setclid
)
2938 struct xdr_netobj clname
= setclid
->se_name
;
2939 nfs4_verifier clverifier
= setclid
->se_verf
;
2940 struct nfs4_client
*conf
, *new;
2941 struct nfs4_client
*unconf
= NULL
;
2943 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
2945 new = create_client(clname
, rqstp
, &clverifier
);
2947 return nfserr_jukebox
;
2948 /* Cases below refer to rfc 3530 section 14.2.33: */
2949 spin_lock(&nn
->client_lock
);
2950 conf
= find_confirmed_client_by_name(&clname
, nn
);
2953 status
= nfserr_clid_inuse
;
2954 if (clp_used_exchangeid(conf
))
2956 if (!same_creds(&conf
->cl_cred
, &rqstp
->rq_cred
)) {
2957 char addr_str
[INET6_ADDRSTRLEN
];
2958 rpc_ntop((struct sockaddr
*) &conf
->cl_addr
, addr_str
,
2960 dprintk("NFSD: setclientid: string in use by client "
2961 "at %s\n", addr_str
);
2965 unconf
= find_unconfirmed_client_by_name(&clname
, nn
);
2967 unhash_client_locked(unconf
);
2968 if (conf
&& same_verf(&conf
->cl_verifier
, &clverifier
))
2969 /* case 1: probable callback update */
2970 copy_clid(new, conf
);
2971 else /* case 4 (new client) or cases 2, 3 (client reboot): */
2973 new->cl_minorversion
= 0;
2974 gen_callback(new, setclid
, rqstp
);
2975 add_to_unconfirmed(new);
2976 setclid
->se_clientid
.cl_boot
= new->cl_clientid
.cl_boot
;
2977 setclid
->se_clientid
.cl_id
= new->cl_clientid
.cl_id
;
2978 memcpy(setclid
->se_confirm
.data
, new->cl_confirm
.data
, sizeof(setclid
->se_confirm
.data
));
2982 spin_unlock(&nn
->client_lock
);
2986 expire_client(unconf
);
2992 nfsd4_setclientid_confirm(struct svc_rqst
*rqstp
,
2993 struct nfsd4_compound_state
*cstate
,
2994 struct nfsd4_setclientid_confirm
*setclientid_confirm
)
2996 struct nfs4_client
*conf
, *unconf
;
2997 struct nfs4_client
*old
= NULL
;
2998 nfs4_verifier confirm
= setclientid_confirm
->sc_confirm
;
2999 clientid_t
* clid
= &setclientid_confirm
->sc_clientid
;
3001 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
3003 if (STALE_CLIENTID(clid
, nn
))
3004 return nfserr_stale_clientid
;
3006 spin_lock(&nn
->client_lock
);
3007 conf
= find_confirmed_client(clid
, false, nn
);
3008 unconf
= find_unconfirmed_client(clid
, false, nn
);
3010 * We try hard to give out unique clientid's, so if we get an
3011 * attempt to confirm the same clientid with a different cred,
3012 * there's a bug somewhere. Let's charitably assume it's our
3015 status
= nfserr_serverfault
;
3016 if (unconf
&& !same_creds(&unconf
->cl_cred
, &rqstp
->rq_cred
))
3018 if (conf
&& !same_creds(&conf
->cl_cred
, &rqstp
->rq_cred
))
3020 /* cases below refer to rfc 3530 section 14.2.34: */
3021 if (!unconf
|| !same_verf(&confirm
, &unconf
->cl_confirm
)) {
3022 if (conf
&& !unconf
) /* case 2: probable retransmit */
3024 else /* case 4: client hasn't noticed we rebooted yet? */
3025 status
= nfserr_stale_clientid
;
3029 if (conf
) { /* case 1: callback update */
3031 unhash_client_locked(old
);
3032 nfsd4_change_callback(conf
, &unconf
->cl_cb_conn
);
3033 } else { /* case 3: normal case; new or rebooted client */
3034 old
= find_confirmed_client_by_name(&unconf
->cl_name
, nn
);
3036 status
= mark_client_expired_locked(old
);
3042 move_to_confirmed(unconf
);
3045 get_client_locked(conf
);
3046 spin_unlock(&nn
->client_lock
);
3047 nfsd4_probe_callback(conf
);
3048 spin_lock(&nn
->client_lock
);
3049 put_client_renew_locked(conf
);
3051 spin_unlock(&nn
->client_lock
);
3057 static struct nfs4_file
*nfsd4_alloc_file(void)
3059 return kmem_cache_alloc(file_slab
, GFP_KERNEL
);
3062 /* OPEN Share state helper functions */
3063 static void nfsd4_init_file(struct nfs4_file
*fp
, struct knfsd_fh
*fh
)
3065 unsigned int hashval
= file_hashval(fh
);
3067 lockdep_assert_held(&state_lock
);
3069 atomic_set(&fp
->fi_ref
, 1);
3070 spin_lock_init(&fp
->fi_lock
);
3071 INIT_LIST_HEAD(&fp
->fi_stateids
);
3072 INIT_LIST_HEAD(&fp
->fi_delegations
);
3073 fh_copy_shallow(&fp
->fi_fhandle
, fh
);
3074 fp
->fi_deleg_file
= NULL
;
3075 fp
->fi_had_conflict
= false;
3076 fp
->fi_share_deny
= 0;
3077 memset(fp
->fi_fds
, 0, sizeof(fp
->fi_fds
));
3078 memset(fp
->fi_access
, 0, sizeof(fp
->fi_access
));
3079 hlist_add_head(&fp
->fi_hash
, &file_hashtbl
[hashval
]);
3083 nfsd4_free_slabs(void)
3085 kmem_cache_destroy(openowner_slab
);
3086 kmem_cache_destroy(lockowner_slab
);
3087 kmem_cache_destroy(file_slab
);
3088 kmem_cache_destroy(stateid_slab
);
3089 kmem_cache_destroy(deleg_slab
);
3093 nfsd4_init_slabs(void)
3095 openowner_slab
= kmem_cache_create("nfsd4_openowners",
3096 sizeof(struct nfs4_openowner
), 0, 0, NULL
);
3097 if (openowner_slab
== NULL
)
3099 lockowner_slab
= kmem_cache_create("nfsd4_lockowners",
3100 sizeof(struct nfs4_lockowner
), 0, 0, NULL
);
3101 if (lockowner_slab
== NULL
)
3102 goto out_free_openowner_slab
;
3103 file_slab
= kmem_cache_create("nfsd4_files",
3104 sizeof(struct nfs4_file
), 0, 0, NULL
);
3105 if (file_slab
== NULL
)
3106 goto out_free_lockowner_slab
;
3107 stateid_slab
= kmem_cache_create("nfsd4_stateids",
3108 sizeof(struct nfs4_ol_stateid
), 0, 0, NULL
);
3109 if (stateid_slab
== NULL
)
3110 goto out_free_file_slab
;
3111 deleg_slab
= kmem_cache_create("nfsd4_delegations",
3112 sizeof(struct nfs4_delegation
), 0, 0, NULL
);
3113 if (deleg_slab
== NULL
)
3114 goto out_free_stateid_slab
;
3117 out_free_stateid_slab
:
3118 kmem_cache_destroy(stateid_slab
);
3120 kmem_cache_destroy(file_slab
);
3121 out_free_lockowner_slab
:
3122 kmem_cache_destroy(lockowner_slab
);
3123 out_free_openowner_slab
:
3124 kmem_cache_destroy(openowner_slab
);
3126 dprintk("nfsd4: out of memory while initializing nfsv4\n");
3130 static void init_nfs4_replay(struct nfs4_replay
*rp
)
3132 rp
->rp_status
= nfserr_serverfault
;
3134 rp
->rp_buf
= rp
->rp_ibuf
;
3135 mutex_init(&rp
->rp_mutex
);
3138 static void nfsd4_cstate_assign_replay(struct nfsd4_compound_state
*cstate
,
3139 struct nfs4_stateowner
*so
)
3141 if (!nfsd4_has_session(cstate
)) {
3142 mutex_lock(&so
->so_replay
.rp_mutex
);
3143 cstate
->replay_owner
= nfs4_get_stateowner(so
);
3147 void nfsd4_cstate_clear_replay(struct nfsd4_compound_state
*cstate
)
3149 struct nfs4_stateowner
*so
= cstate
->replay_owner
;
3152 cstate
->replay_owner
= NULL
;
3153 mutex_unlock(&so
->so_replay
.rp_mutex
);
3154 nfs4_put_stateowner(so
);
3158 static inline void *alloc_stateowner(struct kmem_cache
*slab
, struct xdr_netobj
*owner
, struct nfs4_client
*clp
)
3160 struct nfs4_stateowner
*sop
;
3162 sop
= kmem_cache_alloc(slab
, GFP_KERNEL
);
3166 sop
->so_owner
.data
= kmemdup(owner
->data
, owner
->len
, GFP_KERNEL
);
3167 if (!sop
->so_owner
.data
) {
3168 kmem_cache_free(slab
, sop
);
3171 sop
->so_owner
.len
= owner
->len
;
3173 INIT_LIST_HEAD(&sop
->so_stateids
);
3174 sop
->so_client
= clp
;
3175 init_nfs4_replay(&sop
->so_replay
);
3176 atomic_set(&sop
->so_count
, 1);
3180 static void hash_openowner(struct nfs4_openowner
*oo
, struct nfs4_client
*clp
, unsigned int strhashval
)
3182 lockdep_assert_held(&clp
->cl_lock
);
3184 list_add(&oo
->oo_owner
.so_strhash
,
3185 &clp
->cl_ownerstr_hashtbl
[strhashval
]);
3186 list_add(&oo
->oo_perclient
, &clp
->cl_openowners
);
3189 static void nfs4_unhash_openowner(struct nfs4_stateowner
*so
)
3191 unhash_openowner_locked(openowner(so
));
3194 static void nfs4_free_openowner(struct nfs4_stateowner
*so
)
3196 struct nfs4_openowner
*oo
= openowner(so
);
3198 kmem_cache_free(openowner_slab
, oo
);
3201 static const struct nfs4_stateowner_operations openowner_ops
= {
3202 .so_unhash
= nfs4_unhash_openowner
,
3203 .so_free
= nfs4_free_openowner
,
3206 static struct nfs4_openowner
*
3207 alloc_init_open_stateowner(unsigned int strhashval
, struct nfsd4_open
*open
,
3208 struct nfsd4_compound_state
*cstate
)
3210 struct nfs4_client
*clp
= cstate
->clp
;
3211 struct nfs4_openowner
*oo
, *ret
;
3213 oo
= alloc_stateowner(openowner_slab
, &open
->op_owner
, clp
);
3216 oo
->oo_owner
.so_ops
= &openowner_ops
;
3217 oo
->oo_owner
.so_is_open_owner
= 1;
3218 oo
->oo_owner
.so_seqid
= open
->op_seqid
;
3220 if (nfsd4_has_session(cstate
))
3221 oo
->oo_flags
|= NFS4_OO_CONFIRMED
;
3223 oo
->oo_last_closed_stid
= NULL
;
3224 INIT_LIST_HEAD(&oo
->oo_close_lru
);
3225 spin_lock(&clp
->cl_lock
);
3226 ret
= find_openstateowner_str_locked(strhashval
, open
, clp
);
3228 hash_openowner(oo
, clp
, strhashval
);
3231 nfs4_free_openowner(&oo
->oo_owner
);
3232 spin_unlock(&clp
->cl_lock
);
3236 static void init_open_stateid(struct nfs4_ol_stateid
*stp
, struct nfs4_file
*fp
, struct nfsd4_open
*open
) {
3237 struct nfs4_openowner
*oo
= open
->op_openowner
;
3239 atomic_inc(&stp
->st_stid
.sc_count
);
3240 stp
->st_stid
.sc_type
= NFS4_OPEN_STID
;
3241 INIT_LIST_HEAD(&stp
->st_locks
);
3242 stp
->st_stateowner
= nfs4_get_stateowner(&oo
->oo_owner
);
3244 stp
->st_stid
.sc_file
= fp
;
3245 stp
->st_access_bmap
= 0;
3246 stp
->st_deny_bmap
= 0;
3247 stp
->st_openstp
= NULL
;
3248 init_rwsem(&stp
->st_rwsem
);
3249 spin_lock(&oo
->oo_owner
.so_client
->cl_lock
);
3250 list_add(&stp
->st_perstateowner
, &oo
->oo_owner
.so_stateids
);
3251 spin_lock(&fp
->fi_lock
);
3252 list_add(&stp
->st_perfile
, &fp
->fi_stateids
);
3253 spin_unlock(&fp
->fi_lock
);
3254 spin_unlock(&oo
->oo_owner
.so_client
->cl_lock
);
3258 * In the 4.0 case we need to keep the owners around a little while to handle
3259 * CLOSE replay. We still do need to release any file access that is held by
3260 * them before returning however.
3263 move_to_close_lru(struct nfs4_ol_stateid
*s
, struct net
*net
)
3265 struct nfs4_ol_stateid
*last
;
3266 struct nfs4_openowner
*oo
= openowner(s
->st_stateowner
);
3267 struct nfsd_net
*nn
= net_generic(s
->st_stid
.sc_client
->net
,
3270 dprintk("NFSD: move_to_close_lru nfs4_openowner %p\n", oo
);
3273 * We know that we hold one reference via nfsd4_close, and another
3274 * "persistent" reference for the client. If the refcount is higher
3275 * than 2, then there are still calls in progress that are using this
3276 * stateid. We can't put the sc_file reference until they are finished.
3277 * Wait for the refcount to drop to 2. Since it has been unhashed,
3278 * there should be no danger of the refcount going back up again at
3281 wait_event(close_wq
, atomic_read(&s
->st_stid
.sc_count
) == 2);
3283 release_all_access(s
);
3284 if (s
->st_stid
.sc_file
) {
3285 put_nfs4_file(s
->st_stid
.sc_file
);
3286 s
->st_stid
.sc_file
= NULL
;
3289 spin_lock(&nn
->client_lock
);
3290 last
= oo
->oo_last_closed_stid
;
3291 oo
->oo_last_closed_stid
= s
;
3292 list_move_tail(&oo
->oo_close_lru
, &nn
->close_lru
);
3293 oo
->oo_time
= get_seconds();
3294 spin_unlock(&nn
->client_lock
);
3296 nfs4_put_stid(&last
->st_stid
);
3299 /* search file_hashtbl[] for file */
3300 static struct nfs4_file
*
3301 find_file_locked(struct knfsd_fh
*fh
)
3303 unsigned int hashval
= file_hashval(fh
);
3304 struct nfs4_file
*fp
;
3306 lockdep_assert_held(&state_lock
);
3308 hlist_for_each_entry(fp
, &file_hashtbl
[hashval
], fi_hash
) {
3309 if (nfsd_fh_match(&fp
->fi_fhandle
, fh
)) {
3317 static struct nfs4_file
*
3318 find_file(struct knfsd_fh
*fh
)
3320 struct nfs4_file
*fp
;
3322 spin_lock(&state_lock
);
3323 fp
= find_file_locked(fh
);
3324 spin_unlock(&state_lock
);
3328 static struct nfs4_file
*
3329 find_or_add_file(struct nfs4_file
*new, struct knfsd_fh
*fh
)
3331 struct nfs4_file
*fp
;
3333 spin_lock(&state_lock
);
3334 fp
= find_file_locked(fh
);
3336 nfsd4_init_file(new, fh
);
3339 spin_unlock(&state_lock
);
3345 * Called to check deny when READ with all zero stateid or
3346 * WRITE with all zero or all one stateid
3349 nfs4_share_conflict(struct svc_fh
*current_fh
, unsigned int deny_type
)
3351 struct nfs4_file
*fp
;
3352 __be32 ret
= nfs_ok
;
3354 fp
= find_file(¤t_fh
->fh_handle
);
3357 /* Check for conflicting share reservations */
3358 spin_lock(&fp
->fi_lock
);
3359 if (fp
->fi_share_deny
& deny_type
)
3360 ret
= nfserr_locked
;
3361 spin_unlock(&fp
->fi_lock
);
3366 static void nfsd4_cb_recall_prepare(struct nfsd4_callback
*cb
)
3368 struct nfs4_delegation
*dp
= cb_to_delegation(cb
);
3369 struct nfsd_net
*nn
= net_generic(dp
->dl_stid
.sc_client
->net
,
3372 block_delegations(&dp
->dl_stid
.sc_file
->fi_fhandle
);
3375 * We can't do this in nfsd_break_deleg_cb because it is
3376 * already holding inode->i_lock.
3378 * If the dl_time != 0, then we know that it has already been
3379 * queued for a lease break. Don't queue it again.
3381 spin_lock(&state_lock
);
3382 if (dp
->dl_time
== 0) {
3383 dp
->dl_time
= get_seconds();
3384 list_add_tail(&dp
->dl_recall_lru
, &nn
->del_recall_lru
);
3386 spin_unlock(&state_lock
);
3389 static int nfsd4_cb_recall_done(struct nfsd4_callback
*cb
,
3390 struct rpc_task
*task
)
3392 struct nfs4_delegation
*dp
= cb_to_delegation(cb
);
3394 switch (task
->tk_status
) {
3398 case -NFS4ERR_BAD_STATEID
:
3400 * Race: client probably got cb_recall before open reply
3401 * granting delegation.
3403 if (dp
->dl_retries
--) {
3404 rpc_delay(task
, 2 * HZ
);
3413 static void nfsd4_cb_recall_release(struct nfsd4_callback
*cb
)
3415 struct nfs4_delegation
*dp
= cb_to_delegation(cb
);
3417 nfs4_put_stid(&dp
->dl_stid
);
3420 static struct nfsd4_callback_ops nfsd4_cb_recall_ops
= {
3421 .prepare
= nfsd4_cb_recall_prepare
,
3422 .done
= nfsd4_cb_recall_done
,
3423 .release
= nfsd4_cb_recall_release
,
3426 static void nfsd_break_one_deleg(struct nfs4_delegation
*dp
)
3429 * We're assuming the state code never drops its reference
3430 * without first removing the lease. Since we're in this lease
3431 * callback (and since the lease code is serialized by the kernel
3432 * lock) we know the server hasn't removed the lease yet, we know
3433 * it's safe to take a reference.
3435 atomic_inc(&dp
->dl_stid
.sc_count
);
3436 nfsd4_run_cb(&dp
->dl_recall
);
3439 /* Called from break_lease() with i_lock held. */
3441 nfsd_break_deleg_cb(struct file_lock
*fl
)
3444 struct nfs4_file
*fp
= (struct nfs4_file
*)fl
->fl_owner
;
3445 struct nfs4_delegation
*dp
;
3448 WARN(1, "(%p)->fl_owner NULL\n", fl
);
3451 if (fp
->fi_had_conflict
) {
3452 WARN(1, "duplicate break on %p\n", fp
);
3456 * We don't want the locks code to timeout the lease for us;
3457 * we'll remove it ourself if a delegation isn't returned
3460 fl
->fl_break_time
= 0;
3462 spin_lock(&fp
->fi_lock
);
3463 fp
->fi_had_conflict
= true;
3465 * If there are no delegations on the list, then return true
3466 * so that the lease code will go ahead and delete it.
3468 if (list_empty(&fp
->fi_delegations
))
3471 list_for_each_entry(dp
, &fp
->fi_delegations
, dl_perfile
)
3472 nfsd_break_one_deleg(dp
);
3473 spin_unlock(&fp
->fi_lock
);
3478 nfsd_change_deleg_cb(struct file_lock
**onlist
, int arg
, struct list_head
*dispose
)
3481 return lease_modify(onlist
, arg
, dispose
);
3486 static const struct lock_manager_operations nfsd_lease_mng_ops
= {
3487 .lm_break
= nfsd_break_deleg_cb
,
3488 .lm_change
= nfsd_change_deleg_cb
,
3491 static __be32
nfsd4_check_seqid(struct nfsd4_compound_state
*cstate
, struct nfs4_stateowner
*so
, u32 seqid
)
3493 if (nfsd4_has_session(cstate
))
3495 if (seqid
== so
->so_seqid
- 1)
3496 return nfserr_replay_me
;
3497 if (seqid
== so
->so_seqid
)
3499 return nfserr_bad_seqid
;
3502 static __be32
lookup_clientid(clientid_t
*clid
,
3503 struct nfsd4_compound_state
*cstate
,
3504 struct nfsd_net
*nn
)
3506 struct nfs4_client
*found
;
3509 found
= cstate
->clp
;
3510 if (!same_clid(&found
->cl_clientid
, clid
))
3511 return nfserr_stale_clientid
;
3515 if (STALE_CLIENTID(clid
, nn
))
3516 return nfserr_stale_clientid
;
3519 * For v4.1+ we get the client in the SEQUENCE op. If we don't have one
3520 * cached already then we know this is for is for v4.0 and "sessions"
3523 WARN_ON_ONCE(cstate
->session
);
3524 spin_lock(&nn
->client_lock
);
3525 found
= find_confirmed_client(clid
, false, nn
);
3527 spin_unlock(&nn
->client_lock
);
3528 return nfserr_expired
;
3530 atomic_inc(&found
->cl_refcount
);
3531 spin_unlock(&nn
->client_lock
);
3533 /* Cache the nfs4_client in cstate! */
3534 cstate
->clp
= found
;
3539 nfsd4_process_open1(struct nfsd4_compound_state
*cstate
,
3540 struct nfsd4_open
*open
, struct nfsd_net
*nn
)
3542 clientid_t
*clientid
= &open
->op_clientid
;
3543 struct nfs4_client
*clp
= NULL
;
3544 unsigned int strhashval
;
3545 struct nfs4_openowner
*oo
= NULL
;
3548 if (STALE_CLIENTID(&open
->op_clientid
, nn
))
3549 return nfserr_stale_clientid
;
3551 * In case we need it later, after we've already created the
3552 * file and don't want to risk a further failure:
3554 open
->op_file
= nfsd4_alloc_file();
3555 if (open
->op_file
== NULL
)
3556 return nfserr_jukebox
;
3558 status
= lookup_clientid(clientid
, cstate
, nn
);
3563 strhashval
= ownerstr_hashval(&open
->op_owner
);
3564 oo
= find_openstateowner_str(strhashval
, open
, clp
);
3565 open
->op_openowner
= oo
;
3569 if (!(oo
->oo_flags
& NFS4_OO_CONFIRMED
)) {
3570 /* Replace unconfirmed owners without checking for replay. */
3571 release_openowner(oo
);
3572 open
->op_openowner
= NULL
;
3575 status
= nfsd4_check_seqid(cstate
, &oo
->oo_owner
, open
->op_seqid
);
3580 oo
= alloc_init_open_stateowner(strhashval
, open
, cstate
);
3582 return nfserr_jukebox
;
3583 open
->op_openowner
= oo
;
3585 open
->op_stp
= nfs4_alloc_open_stateid(clp
);
3587 return nfserr_jukebox
;
3591 static inline __be32
3592 nfs4_check_delegmode(struct nfs4_delegation
*dp
, int flags
)
3594 if ((flags
& WR_STATE
) && (dp
->dl_type
== NFS4_OPEN_DELEGATE_READ
))
3595 return nfserr_openmode
;
3600 static int share_access_to_flags(u32 share_access
)
3602 return share_access
== NFS4_SHARE_ACCESS_READ
? RD_STATE
: WR_STATE
;
3605 static struct nfs4_delegation
*find_deleg_stateid(struct nfs4_client
*cl
, stateid_t
*s
)
3607 struct nfs4_stid
*ret
;
3609 ret
= find_stateid_by_type(cl
, s
,
3610 NFS4_DELEG_STID
|NFS4_REVOKED_DELEG_STID
);
3613 return delegstateid(ret
);
3616 static bool nfsd4_is_deleg_cur(struct nfsd4_open
*open
)
3618 return open
->op_claim_type
== NFS4_OPEN_CLAIM_DELEGATE_CUR
||
3619 open
->op_claim_type
== NFS4_OPEN_CLAIM_DELEG_CUR_FH
;
3623 nfs4_check_deleg(struct nfs4_client
*cl
, struct nfsd4_open
*open
,
3624 struct nfs4_delegation
**dp
)
3627 __be32 status
= nfserr_bad_stateid
;
3628 struct nfs4_delegation
*deleg
;
3630 deleg
= find_deleg_stateid(cl
, &open
->op_delegate_stateid
);
3633 if (deleg
->dl_stid
.sc_type
== NFS4_REVOKED_DELEG_STID
) {
3634 nfs4_put_stid(&deleg
->dl_stid
);
3635 if (cl
->cl_minorversion
)
3636 status
= nfserr_deleg_revoked
;
3639 flags
= share_access_to_flags(open
->op_share_access
);
3640 status
= nfs4_check_delegmode(deleg
, flags
);
3642 nfs4_put_stid(&deleg
->dl_stid
);
3647 if (!nfsd4_is_deleg_cur(open
))
3651 open
->op_openowner
->oo_flags
|= NFS4_OO_CONFIRMED
;
3655 static struct nfs4_ol_stateid
*
3656 nfsd4_find_existing_open(struct nfs4_file
*fp
, struct nfsd4_open
*open
)
3658 struct nfs4_ol_stateid
*local
, *ret
= NULL
;
3659 struct nfs4_openowner
*oo
= open
->op_openowner
;
3661 spin_lock(&fp
->fi_lock
);
3662 list_for_each_entry(local
, &fp
->fi_stateids
, st_perfile
) {
3663 /* ignore lock owners */
3664 if (local
->st_stateowner
->so_is_open_owner
== 0)
3666 if (local
->st_stateowner
== &oo
->oo_owner
) {
3668 atomic_inc(&ret
->st_stid
.sc_count
);
3672 spin_unlock(&fp
->fi_lock
);
3676 static inline int nfs4_access_to_access(u32 nfs4_access
)
3680 if (nfs4_access
& NFS4_SHARE_ACCESS_READ
)
3681 flags
|= NFSD_MAY_READ
;
3682 if (nfs4_access
& NFS4_SHARE_ACCESS_WRITE
)
3683 flags
|= NFSD_MAY_WRITE
;
3687 static inline __be32
3688 nfsd4_truncate(struct svc_rqst
*rqstp
, struct svc_fh
*fh
,
3689 struct nfsd4_open
*open
)
3691 struct iattr iattr
= {
3692 .ia_valid
= ATTR_SIZE
,
3695 if (!open
->op_truncate
)
3697 if (!(open
->op_share_access
& NFS4_SHARE_ACCESS_WRITE
))
3698 return nfserr_inval
;
3699 return nfsd_setattr(rqstp
, fh
, &iattr
, 0, (time_t)0);
3702 static __be32
nfs4_get_vfs_file(struct svc_rqst
*rqstp
, struct nfs4_file
*fp
,
3703 struct svc_fh
*cur_fh
, struct nfs4_ol_stateid
*stp
,
3704 struct nfsd4_open
*open
)
3706 struct file
*filp
= NULL
;
3708 int oflag
= nfs4_access_to_omode(open
->op_share_access
);
3709 int access
= nfs4_access_to_access(open
->op_share_access
);
3710 unsigned char old_access_bmap
, old_deny_bmap
;
3712 spin_lock(&fp
->fi_lock
);
3715 * Are we trying to set a deny mode that would conflict with
3718 status
= nfs4_file_check_deny(fp
, open
->op_share_deny
);
3719 if (status
!= nfs_ok
) {
3720 spin_unlock(&fp
->fi_lock
);
3724 /* set access to the file */
3725 status
= nfs4_file_get_access(fp
, open
->op_share_access
);
3726 if (status
!= nfs_ok
) {
3727 spin_unlock(&fp
->fi_lock
);
3731 /* Set access bits in stateid */
3732 old_access_bmap
= stp
->st_access_bmap
;
3733 set_access(open
->op_share_access
, stp
);
3735 /* Set new deny mask */
3736 old_deny_bmap
= stp
->st_deny_bmap
;
3737 set_deny(open
->op_share_deny
, stp
);
3738 fp
->fi_share_deny
|= (open
->op_share_deny
& NFS4_SHARE_DENY_BOTH
);
3740 if (!fp
->fi_fds
[oflag
]) {
3741 spin_unlock(&fp
->fi_lock
);
3742 status
= nfsd_open(rqstp
, cur_fh
, S_IFREG
, access
, &filp
);
3744 goto out_put_access
;
3745 spin_lock(&fp
->fi_lock
);
3746 if (!fp
->fi_fds
[oflag
]) {
3747 fp
->fi_fds
[oflag
] = filp
;
3751 spin_unlock(&fp
->fi_lock
);
3755 status
= nfsd4_truncate(rqstp
, cur_fh
, open
);
3757 goto out_put_access
;
3761 stp
->st_access_bmap
= old_access_bmap
;
3762 nfs4_file_put_access(fp
, open
->op_share_access
);
3763 reset_union_bmap_deny(bmap_to_share_mode(old_deny_bmap
), stp
);
3768 nfs4_upgrade_open(struct svc_rqst
*rqstp
, struct nfs4_file
*fp
, struct svc_fh
*cur_fh
, struct nfs4_ol_stateid
*stp
, struct nfsd4_open
*open
)
3771 unsigned char old_deny_bmap
= stp
->st_deny_bmap
;
3773 if (!test_access(open
->op_share_access
, stp
))
3774 return nfs4_get_vfs_file(rqstp
, fp
, cur_fh
, stp
, open
);
3776 /* test and set deny mode */
3777 spin_lock(&fp
->fi_lock
);
3778 status
= nfs4_file_check_deny(fp
, open
->op_share_deny
);
3779 if (status
== nfs_ok
) {
3780 set_deny(open
->op_share_deny
, stp
);
3781 fp
->fi_share_deny
|=
3782 (open
->op_share_deny
& NFS4_SHARE_DENY_BOTH
);
3784 spin_unlock(&fp
->fi_lock
);
3786 if (status
!= nfs_ok
)
3789 status
= nfsd4_truncate(rqstp
, cur_fh
, open
);
3790 if (status
!= nfs_ok
)
3791 reset_union_bmap_deny(old_deny_bmap
, stp
);
3796 nfs4_set_claim_prev(struct nfsd4_open
*open
, bool has_session
)
3798 open
->op_openowner
->oo_flags
|= NFS4_OO_CONFIRMED
;
3801 /* Should we give out recallable state?: */
3802 static bool nfsd4_cb_channel_good(struct nfs4_client
*clp
)
3804 if (clp
->cl_cb_state
== NFSD4_CB_UP
)
3807 * In the sessions case, since we don't have to establish a
3808 * separate connection for callbacks, we assume it's OK
3809 * until we hear otherwise:
3811 return clp
->cl_minorversion
&& clp
->cl_cb_state
== NFSD4_CB_UNKNOWN
;
3814 static struct file_lock
*nfs4_alloc_init_lease(struct nfs4_file
*fp
, int flag
)
3816 struct file_lock
*fl
;
3818 fl
= locks_alloc_lock();
3821 fl
->fl_lmops
= &nfsd_lease_mng_ops
;
3822 fl
->fl_flags
= FL_DELEG
;
3823 fl
->fl_type
= flag
== NFS4_OPEN_DELEGATE_READ
? F_RDLCK
: F_WRLCK
;
3824 fl
->fl_end
= OFFSET_MAX
;
3825 fl
->fl_owner
= (fl_owner_t
)fp
;
3826 fl
->fl_pid
= current
->tgid
;
3830 static int nfs4_setlease(struct nfs4_delegation
*dp
)
3832 struct nfs4_file
*fp
= dp
->dl_stid
.sc_file
;
3833 struct file_lock
*fl
, *ret
;
3837 fl
= nfs4_alloc_init_lease(fp
, NFS4_OPEN_DELEGATE_READ
);
3840 filp
= find_readable_file(fp
);
3842 /* We should always have a readable file here */
3848 status
= vfs_setlease(filp
, fl
->fl_type
, &fl
, NULL
);
3850 locks_free_lock(fl
);
3853 spin_lock(&state_lock
);
3854 spin_lock(&fp
->fi_lock
);
3855 /* Did the lease get broken before we took the lock? */
3857 if (fp
->fi_had_conflict
)
3860 if (fp
->fi_deleg_file
) {
3862 atomic_inc(&fp
->fi_delegees
);
3863 hash_delegation_locked(dp
, fp
);
3866 fp
->fi_deleg_file
= filp
;
3867 atomic_set(&fp
->fi_delegees
, 1);
3868 hash_delegation_locked(dp
, fp
);
3869 spin_unlock(&fp
->fi_lock
);
3870 spin_unlock(&state_lock
);
3873 spin_unlock(&fp
->fi_lock
);
3874 spin_unlock(&state_lock
);
3880 static struct nfs4_delegation
*
3881 nfs4_set_delegation(struct nfs4_client
*clp
, struct svc_fh
*fh
,
3882 struct nfs4_file
*fp
)
3885 struct nfs4_delegation
*dp
;
3887 if (fp
->fi_had_conflict
)
3888 return ERR_PTR(-EAGAIN
);
3890 dp
= alloc_init_deleg(clp
, fh
);
3892 return ERR_PTR(-ENOMEM
);
3895 spin_lock(&state_lock
);
3896 spin_lock(&fp
->fi_lock
);
3897 dp
->dl_stid
.sc_file
= fp
;
3898 if (!fp
->fi_deleg_file
) {
3899 spin_unlock(&fp
->fi_lock
);
3900 spin_unlock(&state_lock
);
3901 status
= nfs4_setlease(dp
);
3904 if (fp
->fi_had_conflict
) {
3908 atomic_inc(&fp
->fi_delegees
);
3909 hash_delegation_locked(dp
, fp
);
3912 spin_unlock(&fp
->fi_lock
);
3913 spin_unlock(&state_lock
);
3916 nfs4_put_stid(&dp
->dl_stid
);
3917 return ERR_PTR(status
);
3922 static void nfsd4_open_deleg_none_ext(struct nfsd4_open
*open
, int status
)
3924 open
->op_delegate_type
= NFS4_OPEN_DELEGATE_NONE_EXT
;
3925 if (status
== -EAGAIN
)
3926 open
->op_why_no_deleg
= WND4_CONTENTION
;
3928 open
->op_why_no_deleg
= WND4_RESOURCE
;
3929 switch (open
->op_deleg_want
) {
3930 case NFS4_SHARE_WANT_READ_DELEG
:
3931 case NFS4_SHARE_WANT_WRITE_DELEG
:
3932 case NFS4_SHARE_WANT_ANY_DELEG
:
3934 case NFS4_SHARE_WANT_CANCEL
:
3935 open
->op_why_no_deleg
= WND4_CANCELLED
;
3937 case NFS4_SHARE_WANT_NO_DELEG
:
3944 * Attempt to hand out a delegation.
3946 * Note we don't support write delegations, and won't until the vfs has
3947 * proper support for them.
3950 nfs4_open_delegation(struct svc_fh
*fh
, struct nfsd4_open
*open
,
3951 struct nfs4_ol_stateid
*stp
)
3953 struct nfs4_delegation
*dp
;
3954 struct nfs4_openowner
*oo
= openowner(stp
->st_stateowner
);
3955 struct nfs4_client
*clp
= stp
->st_stid
.sc_client
;
3959 cb_up
= nfsd4_cb_channel_good(oo
->oo_owner
.so_client
);
3960 open
->op_recall
= 0;
3961 switch (open
->op_claim_type
) {
3962 case NFS4_OPEN_CLAIM_PREVIOUS
:
3964 open
->op_recall
= 1;
3965 if (open
->op_delegate_type
!= NFS4_OPEN_DELEGATE_READ
)
3968 case NFS4_OPEN_CLAIM_NULL
:
3969 case NFS4_OPEN_CLAIM_FH
:
3971 * Let's not give out any delegations till everyone's
3972 * had the chance to reclaim theirs....
3974 if (locks_in_grace(clp
->net
))
3976 if (!cb_up
|| !(oo
->oo_flags
& NFS4_OO_CONFIRMED
))
3979 * Also, if the file was opened for write or
3980 * create, there's a good chance the client's
3981 * about to write to it, resulting in an
3982 * immediate recall (since we don't support
3983 * write delegations):
3985 if (open
->op_share_access
& NFS4_SHARE_ACCESS_WRITE
)
3987 if (open
->op_create
== NFS4_OPEN_CREATE
)
3993 dp
= nfs4_set_delegation(clp
, fh
, stp
->st_stid
.sc_file
);
3997 memcpy(&open
->op_delegate_stateid
, &dp
->dl_stid
.sc_stateid
, sizeof(dp
->dl_stid
.sc_stateid
));
3999 dprintk("NFSD: delegation stateid=" STATEID_FMT
"\n",
4000 STATEID_VAL(&dp
->dl_stid
.sc_stateid
));
4001 open
->op_delegate_type
= NFS4_OPEN_DELEGATE_READ
;
4002 nfs4_put_stid(&dp
->dl_stid
);
4005 open
->op_delegate_type
= NFS4_OPEN_DELEGATE_NONE
;
4006 if (open
->op_claim_type
== NFS4_OPEN_CLAIM_PREVIOUS
&&
4007 open
->op_delegate_type
!= NFS4_OPEN_DELEGATE_NONE
) {
4008 dprintk("NFSD: WARNING: refusing delegation reclaim\n");
4009 open
->op_recall
= 1;
4012 /* 4.1 client asking for a delegation? */
4013 if (open
->op_deleg_want
)
4014 nfsd4_open_deleg_none_ext(open
, status
);
4018 static void nfsd4_deleg_xgrade_none_ext(struct nfsd4_open
*open
,
4019 struct nfs4_delegation
*dp
)
4021 if (open
->op_deleg_want
== NFS4_SHARE_WANT_READ_DELEG
&&
4022 dp
->dl_type
== NFS4_OPEN_DELEGATE_WRITE
) {
4023 open
->op_delegate_type
= NFS4_OPEN_DELEGATE_NONE_EXT
;
4024 open
->op_why_no_deleg
= WND4_NOT_SUPP_DOWNGRADE
;
4025 } else if (open
->op_deleg_want
== NFS4_SHARE_WANT_WRITE_DELEG
&&
4026 dp
->dl_type
== NFS4_OPEN_DELEGATE_WRITE
) {
4027 open
->op_delegate_type
= NFS4_OPEN_DELEGATE_NONE_EXT
;
4028 open
->op_why_no_deleg
= WND4_NOT_SUPP_UPGRADE
;
4030 /* Otherwise the client must be confused wanting a delegation
4031 * it already has, therefore we don't return
4032 * NFS4_OPEN_DELEGATE_NONE_EXT and reason.
4037 nfsd4_process_open2(struct svc_rqst
*rqstp
, struct svc_fh
*current_fh
, struct nfsd4_open
*open
)
4039 struct nfsd4_compoundres
*resp
= rqstp
->rq_resp
;
4040 struct nfs4_client
*cl
= open
->op_openowner
->oo_owner
.so_client
;
4041 struct nfs4_file
*fp
= NULL
;
4042 struct nfs4_ol_stateid
*stp
= NULL
;
4043 struct nfs4_delegation
*dp
= NULL
;
4047 * Lookup file; if found, lookup stateid and check open request,
4048 * and check for delegations in the process of being recalled.
4049 * If not found, create the nfs4_file struct
4051 fp
= find_or_add_file(open
->op_file
, ¤t_fh
->fh_handle
);
4052 if (fp
!= open
->op_file
) {
4053 status
= nfs4_check_deleg(cl
, open
, &dp
);
4056 stp
= nfsd4_find_existing_open(fp
, open
);
4058 open
->op_file
= NULL
;
4059 status
= nfserr_bad_stateid
;
4060 if (nfsd4_is_deleg_cur(open
))
4062 status
= nfserr_jukebox
;
4066 * OPEN the file, or upgrade an existing OPEN.
4067 * If truncate fails, the OPEN fails.
4070 /* Stateid was found, this is an OPEN upgrade */
4071 down_read(&stp
->st_rwsem
);
4072 status
= nfs4_upgrade_open(rqstp
, fp
, current_fh
, stp
, open
);
4074 up_read(&stp
->st_rwsem
);
4079 open
->op_stp
= NULL
;
4080 init_open_stateid(stp
, fp
, open
);
4081 down_read(&stp
->st_rwsem
);
4082 status
= nfs4_get_vfs_file(rqstp
, fp
, current_fh
, stp
, open
);
4084 up_read(&stp
->st_rwsem
);
4085 release_open_stateid(stp
);
4089 update_stateid(&stp
->st_stid
.sc_stateid
);
4090 memcpy(&open
->op_stateid
, &stp
->st_stid
.sc_stateid
, sizeof(stateid_t
));
4091 up_read(&stp
->st_rwsem
);
4093 if (nfsd4_has_session(&resp
->cstate
)) {
4094 if (open
->op_deleg_want
& NFS4_SHARE_WANT_NO_DELEG
) {
4095 open
->op_delegate_type
= NFS4_OPEN_DELEGATE_NONE_EXT
;
4096 open
->op_why_no_deleg
= WND4_NOT_WANTED
;
4102 * Attempt to hand out a delegation. No error return, because the
4103 * OPEN succeeds even if we fail.
4105 nfs4_open_delegation(current_fh
, open
, stp
);
4109 dprintk("%s: stateid=" STATEID_FMT
"\n", __func__
,
4110 STATEID_VAL(&stp
->st_stid
.sc_stateid
));
4112 /* 4.1 client trying to upgrade/downgrade delegation? */
4113 if (open
->op_delegate_type
== NFS4_OPEN_DELEGATE_NONE
&& dp
&&
4114 open
->op_deleg_want
)
4115 nfsd4_deleg_xgrade_none_ext(open
, dp
);
4119 if (status
== 0 && open
->op_claim_type
== NFS4_OPEN_CLAIM_PREVIOUS
)
4120 nfs4_set_claim_prev(open
, nfsd4_has_session(&resp
->cstate
));
4122 * To finish the open response, we just need to set the rflags.
4124 open
->op_rflags
= NFS4_OPEN_RESULT_LOCKTYPE_POSIX
;
4125 if (!(open
->op_openowner
->oo_flags
& NFS4_OO_CONFIRMED
) &&
4126 !nfsd4_has_session(&resp
->cstate
))
4127 open
->op_rflags
|= NFS4_OPEN_RESULT_CONFIRM
;
4129 nfs4_put_stid(&dp
->dl_stid
);
4131 nfs4_put_stid(&stp
->st_stid
);
4136 void nfsd4_cleanup_open_state(struct nfsd4_compound_state
*cstate
,
4137 struct nfsd4_open
*open
, __be32 status
)
4139 if (open
->op_openowner
) {
4140 struct nfs4_stateowner
*so
= &open
->op_openowner
->oo_owner
;
4142 nfsd4_cstate_assign_replay(cstate
, so
);
4143 nfs4_put_stateowner(so
);
4146 nfsd4_free_file(open
->op_file
);
4148 nfs4_put_stid(&open
->op_stp
->st_stid
);
4152 nfsd4_renew(struct svc_rqst
*rqstp
, struct nfsd4_compound_state
*cstate
,
4155 struct nfs4_client
*clp
;
4157 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
4159 dprintk("process_renew(%08x/%08x): starting\n",
4160 clid
->cl_boot
, clid
->cl_id
);
4161 status
= lookup_clientid(clid
, cstate
, nn
);
4165 status
= nfserr_cb_path_down
;
4166 if (!list_empty(&clp
->cl_delegations
)
4167 && clp
->cl_cb_state
!= NFSD4_CB_UP
)
4175 nfsd4_end_grace(struct nfsd_net
*nn
)
4177 /* do nothing if grace period already ended */
4178 if (nn
->grace_ended
)
4181 dprintk("NFSD: end of grace period\n");
4182 nn
->grace_ended
= true;
4184 * If the server goes down again right now, an NFSv4
4185 * client will still be allowed to reclaim after it comes back up,
4186 * even if it hasn't yet had a chance to reclaim state this time.
4189 nfsd4_record_grace_done(nn
);
4191 * At this point, NFSv4 clients can still reclaim. But if the
4192 * server crashes, any that have not yet reclaimed will be out
4193 * of luck on the next boot.
4195 * (NFSv4.1+ clients are considered to have reclaimed once they
4196 * call RECLAIM_COMPLETE. NFSv4.0 clients are considered to
4197 * have reclaimed after their first OPEN.)
4199 locks_end_grace(&nn
->nfsd4_manager
);
4201 * At this point, and once lockd and/or any other containers
4202 * exit their grace period, further reclaims will fail and
4203 * regular locking can resume.
4208 nfs4_laundromat(struct nfsd_net
*nn
)
4210 struct nfs4_client
*clp
;
4211 struct nfs4_openowner
*oo
;
4212 struct nfs4_delegation
*dp
;
4213 struct nfs4_ol_stateid
*stp
;
4214 struct list_head
*pos
, *next
, reaplist
;
4215 time_t cutoff
= get_seconds() - nn
->nfsd4_lease
;
4216 time_t t
, new_timeo
= nn
->nfsd4_lease
;
4218 dprintk("NFSD: laundromat service - starting\n");
4219 nfsd4_end_grace(nn
);
4220 INIT_LIST_HEAD(&reaplist
);
4221 spin_lock(&nn
->client_lock
);
4222 list_for_each_safe(pos
, next
, &nn
->client_lru
) {
4223 clp
= list_entry(pos
, struct nfs4_client
, cl_lru
);
4224 if (time_after((unsigned long)clp
->cl_time
, (unsigned long)cutoff
)) {
4225 t
= clp
->cl_time
- cutoff
;
4226 new_timeo
= min(new_timeo
, t
);
4229 if (mark_client_expired_locked(clp
)) {
4230 dprintk("NFSD: client in use (clientid %08x)\n",
4231 clp
->cl_clientid
.cl_id
);
4234 list_add(&clp
->cl_lru
, &reaplist
);
4236 spin_unlock(&nn
->client_lock
);
4237 list_for_each_safe(pos
, next
, &reaplist
) {
4238 clp
= list_entry(pos
, struct nfs4_client
, cl_lru
);
4239 dprintk("NFSD: purging unused client (clientid %08x)\n",
4240 clp
->cl_clientid
.cl_id
);
4241 list_del_init(&clp
->cl_lru
);
4244 spin_lock(&state_lock
);
4245 list_for_each_safe(pos
, next
, &nn
->del_recall_lru
) {
4246 dp
= list_entry (pos
, struct nfs4_delegation
, dl_recall_lru
);
4247 if (net_generic(dp
->dl_stid
.sc_client
->net
, nfsd_net_id
) != nn
)
4249 if (time_after((unsigned long)dp
->dl_time
, (unsigned long)cutoff
)) {
4250 t
= dp
->dl_time
- cutoff
;
4251 new_timeo
= min(new_timeo
, t
);
4254 unhash_delegation_locked(dp
);
4255 list_add(&dp
->dl_recall_lru
, &reaplist
);
4257 spin_unlock(&state_lock
);
4258 while (!list_empty(&reaplist
)) {
4259 dp
= list_first_entry(&reaplist
, struct nfs4_delegation
,
4261 list_del_init(&dp
->dl_recall_lru
);
4262 revoke_delegation(dp
);
4265 spin_lock(&nn
->client_lock
);
4266 while (!list_empty(&nn
->close_lru
)) {
4267 oo
= list_first_entry(&nn
->close_lru
, struct nfs4_openowner
,
4269 if (time_after((unsigned long)oo
->oo_time
,
4270 (unsigned long)cutoff
)) {
4271 t
= oo
->oo_time
- cutoff
;
4272 new_timeo
= min(new_timeo
, t
);
4275 list_del_init(&oo
->oo_close_lru
);
4276 stp
= oo
->oo_last_closed_stid
;
4277 oo
->oo_last_closed_stid
= NULL
;
4278 spin_unlock(&nn
->client_lock
);
4279 nfs4_put_stid(&stp
->st_stid
);
4280 spin_lock(&nn
->client_lock
);
4282 spin_unlock(&nn
->client_lock
);
4284 new_timeo
= max_t(time_t, new_timeo
, NFSD_LAUNDROMAT_MINTIMEOUT
);
4288 static struct workqueue_struct
*laundry_wq
;
4289 static void laundromat_main(struct work_struct
*);
4292 laundromat_main(struct work_struct
*laundry
)
4295 struct delayed_work
*dwork
= container_of(laundry
, struct delayed_work
,
4297 struct nfsd_net
*nn
= container_of(dwork
, struct nfsd_net
,
4300 t
= nfs4_laundromat(nn
);
4301 dprintk("NFSD: laundromat_main - sleeping for %ld seconds\n", t
);
4302 queue_delayed_work(laundry_wq
, &nn
->laundromat_work
, t
*HZ
);
4305 static inline __be32
nfs4_check_fh(struct svc_fh
*fhp
, struct nfs4_stid
*stp
)
4307 if (!nfsd_fh_match(&fhp
->fh_handle
, &stp
->sc_file
->fi_fhandle
))
4308 return nfserr_bad_stateid
;
4313 access_permit_read(struct nfs4_ol_stateid
*stp
)
4315 return test_access(NFS4_SHARE_ACCESS_READ
, stp
) ||
4316 test_access(NFS4_SHARE_ACCESS_BOTH
, stp
) ||
4317 test_access(NFS4_SHARE_ACCESS_WRITE
, stp
);
4321 access_permit_write(struct nfs4_ol_stateid
*stp
)
4323 return test_access(NFS4_SHARE_ACCESS_WRITE
, stp
) ||
4324 test_access(NFS4_SHARE_ACCESS_BOTH
, stp
);
4328 __be32
nfs4_check_openmode(struct nfs4_ol_stateid
*stp
, int flags
)
4330 __be32 status
= nfserr_openmode
;
4332 /* For lock stateid's, we test the parent open, not the lock: */
4333 if (stp
->st_openstp
)
4334 stp
= stp
->st_openstp
;
4335 if ((flags
& WR_STATE
) && !access_permit_write(stp
))
4337 if ((flags
& RD_STATE
) && !access_permit_read(stp
))
4344 static inline __be32
4345 check_special_stateids(struct net
*net
, svc_fh
*current_fh
, stateid_t
*stateid
, int flags
)
4347 if (ONE_STATEID(stateid
) && (flags
& RD_STATE
))
4349 else if (locks_in_grace(net
)) {
4350 /* Answer in remaining cases depends on existence of
4351 * conflicting state; so we must wait out the grace period. */
4352 return nfserr_grace
;
4353 } else if (flags
& WR_STATE
)
4354 return nfs4_share_conflict(current_fh
,
4355 NFS4_SHARE_DENY_WRITE
);
4356 else /* (flags & RD_STATE) && ZERO_STATEID(stateid) */
4357 return nfs4_share_conflict(current_fh
,
4358 NFS4_SHARE_DENY_READ
);
4362 * Allow READ/WRITE during grace period on recovered state only for files
4363 * that are not able to provide mandatory locking.
4366 grace_disallows_io(struct net
*net
, struct inode
*inode
)
4368 return locks_in_grace(net
) && mandatory_lock(inode
);
4371 /* Returns true iff a is later than b: */
4372 static bool stateid_generation_after(stateid_t
*a
, stateid_t
*b
)
4374 return (s32
)(a
->si_generation
- b
->si_generation
) > 0;
4377 static __be32
check_stateid_generation(stateid_t
*in
, stateid_t
*ref
, bool has_session
)
4380 * When sessions are used the stateid generation number is ignored
4383 if (has_session
&& in
->si_generation
== 0)
4386 if (in
->si_generation
== ref
->si_generation
)
4389 /* If the client sends us a stateid from the future, it's buggy: */
4390 if (stateid_generation_after(in
, ref
))
4391 return nfserr_bad_stateid
;
4393 * However, we could see a stateid from the past, even from a
4394 * non-buggy client. For example, if the client sends a lock
4395 * while some IO is outstanding, the lock may bump si_generation
4396 * while the IO is still in flight. The client could avoid that
4397 * situation by waiting for responses on all the IO requests,
4398 * but better performance may result in retrying IO that
4399 * receives an old_stateid error if requests are rarely
4400 * reordered in flight:
4402 return nfserr_old_stateid
;
4405 static __be32
nfsd4_check_openowner_confirmed(struct nfs4_ol_stateid
*ols
)
4407 if (ols
->st_stateowner
->so_is_open_owner
&&
4408 !(openowner(ols
->st_stateowner
)->oo_flags
& NFS4_OO_CONFIRMED
))
4409 return nfserr_bad_stateid
;
4413 static __be32
nfsd4_validate_stateid(struct nfs4_client
*cl
, stateid_t
*stateid
)
4415 struct nfs4_stid
*s
;
4416 __be32 status
= nfserr_bad_stateid
;
4418 if (ZERO_STATEID(stateid
) || ONE_STATEID(stateid
) ||
4419 CLOSE_STATEID(stateid
))
4421 /* Client debugging aid. */
4422 if (!same_clid(&stateid
->si_opaque
.so_clid
, &cl
->cl_clientid
)) {
4423 char addr_str
[INET6_ADDRSTRLEN
];
4424 rpc_ntop((struct sockaddr
*)&cl
->cl_addr
, addr_str
,
4426 pr_warn_ratelimited("NFSD: client %s testing state ID "
4427 "with incorrect client ID\n", addr_str
);
4430 spin_lock(&cl
->cl_lock
);
4431 s
= find_stateid_locked(cl
, stateid
);
4434 status
= check_stateid_generation(stateid
, &s
->sc_stateid
, 1);
4437 switch (s
->sc_type
) {
4438 case NFS4_DELEG_STID
:
4441 case NFS4_REVOKED_DELEG_STID
:
4442 status
= nfserr_deleg_revoked
;
4444 case NFS4_OPEN_STID
:
4445 case NFS4_LOCK_STID
:
4446 status
= nfsd4_check_openowner_confirmed(openlockstateid(s
));
4449 printk("unknown stateid type %x\n", s
->sc_type
);
4451 case NFS4_CLOSED_STID
:
4452 case NFS4_CLOSED_DELEG_STID
:
4453 status
= nfserr_bad_stateid
;
4456 spin_unlock(&cl
->cl_lock
);
4461 nfsd4_lookup_stateid(struct nfsd4_compound_state
*cstate
,
4462 stateid_t
*stateid
, unsigned char typemask
,
4463 struct nfs4_stid
**s
, struct nfsd_net
*nn
)
4466 bool return_revoked
= false;
4469 * only return revoked delegations if explicitly asked.
4470 * otherwise we report revoked or bad_stateid status.
4472 if (typemask
& NFS4_REVOKED_DELEG_STID
)
4473 return_revoked
= true;
4474 else if (typemask
& NFS4_DELEG_STID
)
4475 typemask
|= NFS4_REVOKED_DELEG_STID
;
4477 if (ZERO_STATEID(stateid
) || ONE_STATEID(stateid
) ||
4478 CLOSE_STATEID(stateid
))
4479 return nfserr_bad_stateid
;
4480 status
= lookup_clientid(&stateid
->si_opaque
.so_clid
, cstate
, nn
);
4481 if (status
== nfserr_stale_clientid
) {
4482 if (cstate
->session
)
4483 return nfserr_bad_stateid
;
4484 return nfserr_stale_stateid
;
4488 *s
= find_stateid_by_type(cstate
->clp
, stateid
, typemask
);
4490 return nfserr_bad_stateid
;
4491 if (((*s
)->sc_type
== NFS4_REVOKED_DELEG_STID
) && !return_revoked
) {
4493 if (cstate
->minorversion
)
4494 return nfserr_deleg_revoked
;
4495 return nfserr_bad_stateid
;
4500 static struct file
*
4501 nfs4_find_file(struct nfs4_stid
*s
, int flags
)
4503 switch (s
->sc_type
) {
4504 case NFS4_DELEG_STID
:
4505 if (WARN_ON_ONCE(!s
->sc_file
->fi_deleg_file
))
4507 return get_file(s
->sc_file
->fi_deleg_file
);
4508 case NFS4_OPEN_STID
:
4509 case NFS4_LOCK_STID
:
4510 if (flags
& RD_STATE
)
4511 return find_readable_file(s
->sc_file
);
4513 return find_writeable_file(s
->sc_file
);
4521 nfs4_check_olstateid(struct svc_fh
*fhp
, struct nfs4_ol_stateid
*ols
, int flags
)
4525 status
= nfsd4_check_openowner_confirmed(ols
);
4528 return nfs4_check_openmode(ols
, flags
);
4532 * Checks for stateid operations
4535 nfs4_preprocess_stateid_op(struct net
*net
, struct nfsd4_compound_state
*cstate
,
4536 stateid_t
*stateid
, int flags
, struct file
**filpp
)
4538 struct svc_fh
*fhp
= &cstate
->current_fh
;
4539 struct inode
*ino
= fhp
->fh_dentry
->d_inode
;
4541 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
4542 struct nfs4_stid
*s
;
4548 if (grace_disallows_io(net
, ino
))
4549 return nfserr_grace
;
4551 if (ZERO_STATEID(stateid
) || ONE_STATEID(stateid
))
4552 return check_special_stateids(net
, fhp
, stateid
, flags
);
4554 status
= nfsd4_lookup_stateid(cstate
, stateid
,
4555 NFS4_DELEG_STID
|NFS4_OPEN_STID
|NFS4_LOCK_STID
,
4559 status
= check_stateid_generation(stateid
, &s
->sc_stateid
,
4560 nfsd4_has_session(cstate
));
4564 switch (s
->sc_type
) {
4565 case NFS4_DELEG_STID
:
4566 status
= nfs4_check_delegmode(delegstateid(s
), flags
);
4568 case NFS4_OPEN_STID
:
4569 case NFS4_LOCK_STID
:
4570 status
= nfs4_check_olstateid(fhp
, openlockstateid(s
), flags
);
4573 status
= nfserr_bad_stateid
;
4578 status
= nfs4_check_fh(fhp
, s
);
4580 if (!status
&& filpp
) {
4581 *filpp
= nfs4_find_file(s
, flags
);
4583 status
= nfserr_serverfault
;
4591 * Test if the stateid is valid
4594 nfsd4_test_stateid(struct svc_rqst
*rqstp
, struct nfsd4_compound_state
*cstate
,
4595 struct nfsd4_test_stateid
*test_stateid
)
4597 struct nfsd4_test_stateid_id
*stateid
;
4598 struct nfs4_client
*cl
= cstate
->session
->se_client
;
4600 list_for_each_entry(stateid
, &test_stateid
->ts_stateid_list
, ts_id_list
)
4601 stateid
->ts_id_status
=
4602 nfsd4_validate_stateid(cl
, &stateid
->ts_id_stateid
);
4608 nfsd4_free_stateid(struct svc_rqst
*rqstp
, struct nfsd4_compound_state
*cstate
,
4609 struct nfsd4_free_stateid
*free_stateid
)
4611 stateid_t
*stateid
= &free_stateid
->fr_stateid
;
4612 struct nfs4_stid
*s
;
4613 struct nfs4_delegation
*dp
;
4614 struct nfs4_ol_stateid
*stp
;
4615 struct nfs4_client
*cl
= cstate
->session
->se_client
;
4616 __be32 ret
= nfserr_bad_stateid
;
4618 spin_lock(&cl
->cl_lock
);
4619 s
= find_stateid_locked(cl
, stateid
);
4622 switch (s
->sc_type
) {
4623 case NFS4_DELEG_STID
:
4624 ret
= nfserr_locks_held
;
4626 case NFS4_OPEN_STID
:
4627 ret
= check_stateid_generation(stateid
, &s
->sc_stateid
, 1);
4630 ret
= nfserr_locks_held
;
4632 case NFS4_LOCK_STID
:
4633 ret
= check_stateid_generation(stateid
, &s
->sc_stateid
, 1);
4636 stp
= openlockstateid(s
);
4637 ret
= nfserr_locks_held
;
4638 if (check_for_locks(stp
->st_stid
.sc_file
,
4639 lockowner(stp
->st_stateowner
)))
4641 unhash_lock_stateid(stp
);
4642 spin_unlock(&cl
->cl_lock
);
4646 case NFS4_REVOKED_DELEG_STID
:
4647 dp
= delegstateid(s
);
4648 list_del_init(&dp
->dl_recall_lru
);
4649 spin_unlock(&cl
->cl_lock
);
4653 /* Default falls through and returns nfserr_bad_stateid */
4656 spin_unlock(&cl
->cl_lock
);
4664 return (type
== NFS4_READW_LT
|| type
== NFS4_READ_LT
) ?
4665 RD_STATE
: WR_STATE
;
4668 static __be32
nfs4_seqid_op_checks(struct nfsd4_compound_state
*cstate
, stateid_t
*stateid
, u32 seqid
, struct nfs4_ol_stateid
*stp
)
4670 struct svc_fh
*current_fh
= &cstate
->current_fh
;
4671 struct nfs4_stateowner
*sop
= stp
->st_stateowner
;
4674 status
= nfsd4_check_seqid(cstate
, sop
, seqid
);
4677 if (stp
->st_stid
.sc_type
== NFS4_CLOSED_STID
4678 || stp
->st_stid
.sc_type
== NFS4_REVOKED_DELEG_STID
)
4680 * "Closed" stateid's exist *only* to return
4681 * nfserr_replay_me from the previous step, and
4682 * revoked delegations are kept only for free_stateid.
4684 return nfserr_bad_stateid
;
4685 down_write(&stp
->st_rwsem
);
4686 status
= check_stateid_generation(stateid
, &stp
->st_stid
.sc_stateid
, nfsd4_has_session(cstate
));
4687 if (status
== nfs_ok
)
4688 status
= nfs4_check_fh(current_fh
, &stp
->st_stid
);
4689 if (status
!= nfs_ok
)
4690 up_write(&stp
->st_rwsem
);
4695 * Checks for sequence id mutating operations.
4698 nfs4_preprocess_seqid_op(struct nfsd4_compound_state
*cstate
, u32 seqid
,
4699 stateid_t
*stateid
, char typemask
,
4700 struct nfs4_ol_stateid
**stpp
,
4701 struct nfsd_net
*nn
)
4704 struct nfs4_stid
*s
;
4705 struct nfs4_ol_stateid
*stp
= NULL
;
4707 dprintk("NFSD: %s: seqid=%d stateid = " STATEID_FMT
"\n", __func__
,
4708 seqid
, STATEID_VAL(stateid
));
4711 status
= nfsd4_lookup_stateid(cstate
, stateid
, typemask
, &s
, nn
);
4714 stp
= openlockstateid(s
);
4715 nfsd4_cstate_assign_replay(cstate
, stp
->st_stateowner
);
4717 status
= nfs4_seqid_op_checks(cstate
, stateid
, seqid
, stp
);
4721 nfs4_put_stid(&stp
->st_stid
);
4725 static __be32
nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state
*cstate
, u32 seqid
,
4726 stateid_t
*stateid
, struct nfs4_ol_stateid
**stpp
, struct nfsd_net
*nn
)
4729 struct nfs4_openowner
*oo
;
4730 struct nfs4_ol_stateid
*stp
;
4732 status
= nfs4_preprocess_seqid_op(cstate
, seqid
, stateid
,
4733 NFS4_OPEN_STID
, &stp
, nn
);
4736 oo
= openowner(stp
->st_stateowner
);
4737 if (!(oo
->oo_flags
& NFS4_OO_CONFIRMED
)) {
4738 up_write(&stp
->st_rwsem
);
4739 nfs4_put_stid(&stp
->st_stid
);
4740 return nfserr_bad_stateid
;
4747 nfsd4_open_confirm(struct svc_rqst
*rqstp
, struct nfsd4_compound_state
*cstate
,
4748 struct nfsd4_open_confirm
*oc
)
4751 struct nfs4_openowner
*oo
;
4752 struct nfs4_ol_stateid
*stp
;
4753 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
4755 dprintk("NFSD: nfsd4_open_confirm on file %pd\n",
4756 cstate
->current_fh
.fh_dentry
);
4758 status
= fh_verify(rqstp
, &cstate
->current_fh
, S_IFREG
, 0);
4762 status
= nfs4_preprocess_seqid_op(cstate
,
4763 oc
->oc_seqid
, &oc
->oc_req_stateid
,
4764 NFS4_OPEN_STID
, &stp
, nn
);
4767 oo
= openowner(stp
->st_stateowner
);
4768 status
= nfserr_bad_stateid
;
4769 if (oo
->oo_flags
& NFS4_OO_CONFIRMED
) {
4770 up_write(&stp
->st_rwsem
);
4773 oo
->oo_flags
|= NFS4_OO_CONFIRMED
;
4774 update_stateid(&stp
->st_stid
.sc_stateid
);
4775 memcpy(&oc
->oc_resp_stateid
, &stp
->st_stid
.sc_stateid
, sizeof(stateid_t
));
4776 up_write(&stp
->st_rwsem
);
4777 dprintk("NFSD: %s: success, seqid=%d stateid=" STATEID_FMT
"\n",
4778 __func__
, oc
->oc_seqid
, STATEID_VAL(&stp
->st_stid
.sc_stateid
));
4780 nfsd4_client_record_create(oo
->oo_owner
.so_client
);
4783 nfs4_put_stid(&stp
->st_stid
);
4785 nfsd4_bump_seqid(cstate
, status
);
4789 static inline void nfs4_stateid_downgrade_bit(struct nfs4_ol_stateid
*stp
, u32 access
)
4791 if (!test_access(access
, stp
))
4793 nfs4_file_put_access(stp
->st_stid
.sc_file
, access
);
4794 clear_access(access
, stp
);
4797 static inline void nfs4_stateid_downgrade(struct nfs4_ol_stateid
*stp
, u32 to_access
)
4799 switch (to_access
) {
4800 case NFS4_SHARE_ACCESS_READ
:
4801 nfs4_stateid_downgrade_bit(stp
, NFS4_SHARE_ACCESS_WRITE
);
4802 nfs4_stateid_downgrade_bit(stp
, NFS4_SHARE_ACCESS_BOTH
);
4804 case NFS4_SHARE_ACCESS_WRITE
:
4805 nfs4_stateid_downgrade_bit(stp
, NFS4_SHARE_ACCESS_READ
);
4806 nfs4_stateid_downgrade_bit(stp
, NFS4_SHARE_ACCESS_BOTH
);
4808 case NFS4_SHARE_ACCESS_BOTH
:
4816 nfsd4_open_downgrade(struct svc_rqst
*rqstp
,
4817 struct nfsd4_compound_state
*cstate
,
4818 struct nfsd4_open_downgrade
*od
)
4821 struct nfs4_ol_stateid
*stp
;
4822 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
4824 dprintk("NFSD: nfsd4_open_downgrade on file %pd\n",
4825 cstate
->current_fh
.fh_dentry
);
4827 /* We don't yet support WANT bits: */
4828 if (od
->od_deleg_want
)
4829 dprintk("NFSD: %s: od_deleg_want=0x%x ignored\n", __func__
,
4832 status
= nfs4_preprocess_confirmed_seqid_op(cstate
, od
->od_seqid
,
4833 &od
->od_stateid
, &stp
, nn
);
4836 status
= nfserr_inval
;
4837 if (!test_access(od
->od_share_access
, stp
)) {
4838 dprintk("NFSD: access not a subset of current bitmap: 0x%hhx, input access=%08x\n",
4839 stp
->st_access_bmap
, od
->od_share_access
);
4842 if (!test_deny(od
->od_share_deny
, stp
)) {
4843 dprintk("NFSD: deny not a subset of current bitmap: 0x%hhx, input deny=%08x\n",
4844 stp
->st_deny_bmap
, od
->od_share_deny
);
4847 nfs4_stateid_downgrade(stp
, od
->od_share_access
);
4849 reset_union_bmap_deny(od
->od_share_deny
, stp
);
4851 update_stateid(&stp
->st_stid
.sc_stateid
);
4852 memcpy(&od
->od_stateid
, &stp
->st_stid
.sc_stateid
, sizeof(stateid_t
));
4855 up_write(&stp
->st_rwsem
);
4856 nfs4_put_stid(&stp
->st_stid
);
4858 nfsd4_bump_seqid(cstate
, status
);
4862 static void nfsd4_close_open_stateid(struct nfs4_ol_stateid
*s
)
4864 struct nfs4_client
*clp
= s
->st_stid
.sc_client
;
4865 LIST_HEAD(reaplist
);
4867 s
->st_stid
.sc_type
= NFS4_CLOSED_STID
;
4868 spin_lock(&clp
->cl_lock
);
4869 unhash_open_stateid(s
, &reaplist
);
4871 if (clp
->cl_minorversion
) {
4872 put_ol_stateid_locked(s
, &reaplist
);
4873 spin_unlock(&clp
->cl_lock
);
4874 free_ol_stateid_reaplist(&reaplist
);
4876 spin_unlock(&clp
->cl_lock
);
4877 free_ol_stateid_reaplist(&reaplist
);
4878 move_to_close_lru(s
, clp
->net
);
4883 * nfs4_unlock_state() called after encode
4886 nfsd4_close(struct svc_rqst
*rqstp
, struct nfsd4_compound_state
*cstate
,
4887 struct nfsd4_close
*close
)
4890 struct nfs4_ol_stateid
*stp
;
4891 struct net
*net
= SVC_NET(rqstp
);
4892 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
4894 dprintk("NFSD: nfsd4_close on file %pd\n",
4895 cstate
->current_fh
.fh_dentry
);
4897 status
= nfs4_preprocess_seqid_op(cstate
, close
->cl_seqid
,
4899 NFS4_OPEN_STID
|NFS4_CLOSED_STID
,
4901 nfsd4_bump_seqid(cstate
, status
);
4904 update_stateid(&stp
->st_stid
.sc_stateid
);
4905 memcpy(&close
->cl_stateid
, &stp
->st_stid
.sc_stateid
, sizeof(stateid_t
));
4906 up_write(&stp
->st_rwsem
);
4908 nfsd4_close_open_stateid(stp
);
4910 /* See RFC5661 sectionm 18.2.4 */
4911 if (stp
->st_stid
.sc_client
->cl_minorversion
)
4912 memcpy(&close
->cl_stateid
, &close_stateid
,
4913 sizeof(close
->cl_stateid
));
4915 /* put reference from nfs4_preprocess_seqid_op */
4916 nfs4_put_stid(&stp
->st_stid
);
4922 nfsd4_delegreturn(struct svc_rqst
*rqstp
, struct nfsd4_compound_state
*cstate
,
4923 struct nfsd4_delegreturn
*dr
)
4925 struct nfs4_delegation
*dp
;
4926 stateid_t
*stateid
= &dr
->dr_stateid
;
4927 struct nfs4_stid
*s
;
4929 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
4931 if ((status
= fh_verify(rqstp
, &cstate
->current_fh
, S_IFREG
, 0)))
4934 status
= nfsd4_lookup_stateid(cstate
, stateid
, NFS4_DELEG_STID
, &s
, nn
);
4937 dp
= delegstateid(s
);
4938 status
= check_stateid_generation(stateid
, &dp
->dl_stid
.sc_stateid
, nfsd4_has_session(cstate
));
4942 destroy_delegation(dp
);
4944 nfs4_put_stid(&dp
->dl_stid
);
4950 #define LOFF_OVERFLOW(start, len) ((u64)(len) > ~(u64)(start))
4953 end_offset(u64 start
, u64 len
)
4958 return end
>= start
? end
: NFS4_MAX_UINT64
;
4961 /* last octet in a range */
4963 last_byte_offset(u64 start
, u64 len
)
4969 return end
> start
? end
- 1: NFS4_MAX_UINT64
;
4973 * TODO: Linux file offsets are _signed_ 64-bit quantities, which means that
4974 * we can't properly handle lock requests that go beyond the (2^63 - 1)-th
4975 * byte, because of sign extension problems. Since NFSv4 calls for 64-bit
4976 * locking, this prevents us from being completely protocol-compliant. The
4977 * real solution to this problem is to start using unsigned file offsets in
4978 * the VFS, but this is a very deep change!
4981 nfs4_transform_lock_offset(struct file_lock
*lock
)
4983 if (lock
->fl_start
< 0)
4984 lock
->fl_start
= OFFSET_MAX
;
4985 if (lock
->fl_end
< 0)
4986 lock
->fl_end
= OFFSET_MAX
;
4989 static void nfsd4_fl_get_owner(struct file_lock
*dst
, struct file_lock
*src
)
4991 struct nfs4_lockowner
*lo
= (struct nfs4_lockowner
*)src
->fl_owner
;
4992 dst
->fl_owner
= (fl_owner_t
)lockowner(nfs4_get_stateowner(&lo
->lo_owner
));
4995 static void nfsd4_fl_put_owner(struct file_lock
*fl
)
4997 struct nfs4_lockowner
*lo
= (struct nfs4_lockowner
*)fl
->fl_owner
;
5000 nfs4_put_stateowner(&lo
->lo_owner
);
5001 fl
->fl_owner
= NULL
;
5005 static const struct lock_manager_operations nfsd_posix_mng_ops
= {
5006 .lm_get_owner
= nfsd4_fl_get_owner
,
5007 .lm_put_owner
= nfsd4_fl_put_owner
,
5011 nfs4_set_lock_denied(struct file_lock
*fl
, struct nfsd4_lock_denied
*deny
)
5013 struct nfs4_lockowner
*lo
;
5015 if (fl
->fl_lmops
== &nfsd_posix_mng_ops
) {
5016 lo
= (struct nfs4_lockowner
*) fl
->fl_owner
;
5017 deny
->ld_owner
.data
= kmemdup(lo
->lo_owner
.so_owner
.data
,
5018 lo
->lo_owner
.so_owner
.len
, GFP_KERNEL
);
5019 if (!deny
->ld_owner
.data
)
5020 /* We just don't care that much */
5022 deny
->ld_owner
.len
= lo
->lo_owner
.so_owner
.len
;
5023 deny
->ld_clientid
= lo
->lo_owner
.so_client
->cl_clientid
;
5026 deny
->ld_owner
.len
= 0;
5027 deny
->ld_owner
.data
= NULL
;
5028 deny
->ld_clientid
.cl_boot
= 0;
5029 deny
->ld_clientid
.cl_id
= 0;
5031 deny
->ld_start
= fl
->fl_start
;
5032 deny
->ld_length
= NFS4_MAX_UINT64
;
5033 if (fl
->fl_end
!= NFS4_MAX_UINT64
)
5034 deny
->ld_length
= fl
->fl_end
- fl
->fl_start
+ 1;
5035 deny
->ld_type
= NFS4_READ_LT
;
5036 if (fl
->fl_type
!= F_RDLCK
)
5037 deny
->ld_type
= NFS4_WRITE_LT
;
5040 static struct nfs4_lockowner
*
5041 find_lockowner_str_locked(clientid_t
*clid
, struct xdr_netobj
*owner
,
5042 struct nfs4_client
*clp
)
5044 unsigned int strhashval
= ownerstr_hashval(owner
);
5045 struct nfs4_stateowner
*so
;
5047 lockdep_assert_held(&clp
->cl_lock
);
5049 list_for_each_entry(so
, &clp
->cl_ownerstr_hashtbl
[strhashval
],
5051 if (so
->so_is_open_owner
)
5053 if (same_owner_str(so
, owner
))
5054 return lockowner(nfs4_get_stateowner(so
));
5059 static struct nfs4_lockowner
*
5060 find_lockowner_str(clientid_t
*clid
, struct xdr_netobj
*owner
,
5061 struct nfs4_client
*clp
)
5063 struct nfs4_lockowner
*lo
;
5065 spin_lock(&clp
->cl_lock
);
5066 lo
= find_lockowner_str_locked(clid
, owner
, clp
);
5067 spin_unlock(&clp
->cl_lock
);
5071 static void nfs4_unhash_lockowner(struct nfs4_stateowner
*sop
)
5073 unhash_lockowner_locked(lockowner(sop
));
5076 static void nfs4_free_lockowner(struct nfs4_stateowner
*sop
)
5078 struct nfs4_lockowner
*lo
= lockowner(sop
);
5080 kmem_cache_free(lockowner_slab
, lo
);
5083 static const struct nfs4_stateowner_operations lockowner_ops
= {
5084 .so_unhash
= nfs4_unhash_lockowner
,
5085 .so_free
= nfs4_free_lockowner
,
5089 * Alloc a lock owner structure.
5090 * Called in nfsd4_lock - therefore, OPEN and OPEN_CONFIRM (if needed) has
5093 * strhashval = ownerstr_hashval
5095 static struct nfs4_lockowner
*
5096 alloc_init_lock_stateowner(unsigned int strhashval
, struct nfs4_client
*clp
,
5097 struct nfs4_ol_stateid
*open_stp
,
5098 struct nfsd4_lock
*lock
)
5100 struct nfs4_lockowner
*lo
, *ret
;
5102 lo
= alloc_stateowner(lockowner_slab
, &lock
->lk_new_owner
, clp
);
5105 INIT_LIST_HEAD(&lo
->lo_owner
.so_stateids
);
5106 lo
->lo_owner
.so_is_open_owner
= 0;
5107 lo
->lo_owner
.so_seqid
= lock
->lk_new_lock_seqid
;
5108 lo
->lo_owner
.so_ops
= &lockowner_ops
;
5109 spin_lock(&clp
->cl_lock
);
5110 ret
= find_lockowner_str_locked(&clp
->cl_clientid
,
5111 &lock
->lk_new_owner
, clp
);
5113 list_add(&lo
->lo_owner
.so_strhash
,
5114 &clp
->cl_ownerstr_hashtbl
[strhashval
]);
5117 nfs4_free_lockowner(&lo
->lo_owner
);
5118 spin_unlock(&clp
->cl_lock
);
5123 init_lock_stateid(struct nfs4_ol_stateid
*stp
, struct nfs4_lockowner
*lo
,
5124 struct nfs4_file
*fp
, struct inode
*inode
,
5125 struct nfs4_ol_stateid
*open_stp
)
5127 struct nfs4_client
*clp
= lo
->lo_owner
.so_client
;
5129 lockdep_assert_held(&clp
->cl_lock
);
5131 atomic_inc(&stp
->st_stid
.sc_count
);
5132 stp
->st_stid
.sc_type
= NFS4_LOCK_STID
;
5133 stp
->st_stateowner
= nfs4_get_stateowner(&lo
->lo_owner
);
5135 stp
->st_stid
.sc_file
= fp
;
5136 stp
->st_stid
.sc_free
= nfs4_free_lock_stateid
;
5137 stp
->st_access_bmap
= 0;
5138 stp
->st_deny_bmap
= open_stp
->st_deny_bmap
;
5139 stp
->st_openstp
= open_stp
;
5140 init_rwsem(&stp
->st_rwsem
);
5141 list_add(&stp
->st_locks
, &open_stp
->st_locks
);
5142 list_add(&stp
->st_perstateowner
, &lo
->lo_owner
.so_stateids
);
5143 spin_lock(&fp
->fi_lock
);
5144 list_add(&stp
->st_perfile
, &fp
->fi_stateids
);
5145 spin_unlock(&fp
->fi_lock
);
5148 static struct nfs4_ol_stateid
*
5149 find_lock_stateid(struct nfs4_lockowner
*lo
, struct nfs4_file
*fp
)
5151 struct nfs4_ol_stateid
*lst
;
5152 struct nfs4_client
*clp
= lo
->lo_owner
.so_client
;
5154 lockdep_assert_held(&clp
->cl_lock
);
5156 list_for_each_entry(lst
, &lo
->lo_owner
.so_stateids
, st_perstateowner
) {
5157 if (lst
->st_stid
.sc_file
== fp
) {
5158 atomic_inc(&lst
->st_stid
.sc_count
);
5165 static struct nfs4_ol_stateid
*
5166 find_or_create_lock_stateid(struct nfs4_lockowner
*lo
, struct nfs4_file
*fi
,
5167 struct inode
*inode
, struct nfs4_ol_stateid
*ost
,
5170 struct nfs4_stid
*ns
= NULL
;
5171 struct nfs4_ol_stateid
*lst
;
5172 struct nfs4_openowner
*oo
= openowner(ost
->st_stateowner
);
5173 struct nfs4_client
*clp
= oo
->oo_owner
.so_client
;
5175 spin_lock(&clp
->cl_lock
);
5176 lst
= find_lock_stateid(lo
, fi
);
5178 spin_unlock(&clp
->cl_lock
);
5179 ns
= nfs4_alloc_stid(clp
, stateid_slab
);
5183 spin_lock(&clp
->cl_lock
);
5184 lst
= find_lock_stateid(lo
, fi
);
5186 lst
= openlockstateid(ns
);
5187 init_lock_stateid(lst
, lo
, fi
, inode
, ost
);
5192 spin_unlock(&clp
->cl_lock
);
5199 check_lock_length(u64 offset
, u64 length
)
5201 return ((length
== 0) || ((length
!= NFS4_MAX_UINT64
) &&
5202 LOFF_OVERFLOW(offset
, length
)));
5205 static void get_lock_access(struct nfs4_ol_stateid
*lock_stp
, u32 access
)
5207 struct nfs4_file
*fp
= lock_stp
->st_stid
.sc_file
;
5209 lockdep_assert_held(&fp
->fi_lock
);
5211 if (test_access(access
, lock_stp
))
5213 __nfs4_file_get_access(fp
, access
);
5214 set_access(access
, lock_stp
);
5218 lookup_or_create_lock_state(struct nfsd4_compound_state
*cstate
,
5219 struct nfs4_ol_stateid
*ost
,
5220 struct nfsd4_lock
*lock
,
5221 struct nfs4_ol_stateid
**lst
, bool *new)
5224 struct nfs4_file
*fi
= ost
->st_stid
.sc_file
;
5225 struct nfs4_openowner
*oo
= openowner(ost
->st_stateowner
);
5226 struct nfs4_client
*cl
= oo
->oo_owner
.so_client
;
5227 struct inode
*inode
= cstate
->current_fh
.fh_dentry
->d_inode
;
5228 struct nfs4_lockowner
*lo
;
5229 unsigned int strhashval
;
5231 lo
= find_lockowner_str(&cl
->cl_clientid
, &lock
->v
.new.owner
, cl
);
5233 strhashval
= ownerstr_hashval(&lock
->v
.new.owner
);
5234 lo
= alloc_init_lock_stateowner(strhashval
, cl
, ost
, lock
);
5236 return nfserr_jukebox
;
5238 /* with an existing lockowner, seqids must be the same */
5239 status
= nfserr_bad_seqid
;
5240 if (!cstate
->minorversion
&&
5241 lock
->lk_new_lock_seqid
!= lo
->lo_owner
.so_seqid
)
5245 *lst
= find_or_create_lock_stateid(lo
, fi
, inode
, ost
, new);
5247 status
= nfserr_jukebox
;
5252 nfs4_put_stateowner(&lo
->lo_owner
);
5260 nfsd4_lock(struct svc_rqst
*rqstp
, struct nfsd4_compound_state
*cstate
,
5261 struct nfsd4_lock
*lock
)
5263 struct nfs4_openowner
*open_sop
= NULL
;
5264 struct nfs4_lockowner
*lock_sop
= NULL
;
5265 struct nfs4_ol_stateid
*lock_stp
= NULL
;
5266 struct nfs4_ol_stateid
*open_stp
= NULL
;
5267 struct nfs4_file
*fp
;
5268 struct file
*filp
= NULL
;
5269 struct file_lock
*file_lock
= NULL
;
5270 struct file_lock
*conflock
= NULL
;
5275 struct net
*net
= SVC_NET(rqstp
);
5276 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
5278 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
5279 (long long) lock
->lk_offset
,
5280 (long long) lock
->lk_length
);
5282 if (check_lock_length(lock
->lk_offset
, lock
->lk_length
))
5283 return nfserr_inval
;
5285 if ((status
= fh_verify(rqstp
, &cstate
->current_fh
,
5286 S_IFREG
, NFSD_MAY_LOCK
))) {
5287 dprintk("NFSD: nfsd4_lock: permission denied!\n");
5291 if (lock
->lk_is_new
) {
5292 if (nfsd4_has_session(cstate
))
5293 /* See rfc 5661 18.10.3: given clientid is ignored: */
5294 memcpy(&lock
->v
.new.clientid
,
5295 &cstate
->session
->se_client
->cl_clientid
,
5296 sizeof(clientid_t
));
5298 status
= nfserr_stale_clientid
;
5299 if (STALE_CLIENTID(&lock
->lk_new_clientid
, nn
))
5302 /* validate and update open stateid and open seqid */
5303 status
= nfs4_preprocess_confirmed_seqid_op(cstate
,
5304 lock
->lk_new_open_seqid
,
5305 &lock
->lk_new_open_stateid
,
5309 up_write(&open_stp
->st_rwsem
);
5310 open_sop
= openowner(open_stp
->st_stateowner
);
5311 status
= nfserr_bad_stateid
;
5312 if (!same_clid(&open_sop
->oo_owner
.so_client
->cl_clientid
,
5313 &lock
->v
.new.clientid
))
5315 status
= lookup_or_create_lock_state(cstate
, open_stp
, lock
,
5317 if (status
== nfs_ok
)
5318 down_write(&lock_stp
->st_rwsem
);
5320 status
= nfs4_preprocess_seqid_op(cstate
,
5321 lock
->lk_old_lock_seqid
,
5322 &lock
->lk_old_lock_stateid
,
5323 NFS4_LOCK_STID
, &lock_stp
, nn
);
5327 lock_sop
= lockowner(lock_stp
->st_stateowner
);
5329 lkflg
= setlkflg(lock
->lk_type
);
5330 status
= nfs4_check_openmode(lock_stp
, lkflg
);
5334 status
= nfserr_grace
;
5335 if (locks_in_grace(net
) && !lock
->lk_reclaim
)
5337 status
= nfserr_no_grace
;
5338 if (!locks_in_grace(net
) && lock
->lk_reclaim
)
5341 file_lock
= locks_alloc_lock();
5343 dprintk("NFSD: %s: unable to allocate lock!\n", __func__
);
5344 status
= nfserr_jukebox
;
5348 fp
= lock_stp
->st_stid
.sc_file
;
5349 switch (lock
->lk_type
) {
5352 spin_lock(&fp
->fi_lock
);
5353 filp
= find_readable_file_locked(fp
);
5355 get_lock_access(lock_stp
, NFS4_SHARE_ACCESS_READ
);
5356 spin_unlock(&fp
->fi_lock
);
5357 file_lock
->fl_type
= F_RDLCK
;
5360 case NFS4_WRITEW_LT
:
5361 spin_lock(&fp
->fi_lock
);
5362 filp
= find_writeable_file_locked(fp
);
5364 get_lock_access(lock_stp
, NFS4_SHARE_ACCESS_WRITE
);
5365 spin_unlock(&fp
->fi_lock
);
5366 file_lock
->fl_type
= F_WRLCK
;
5369 status
= nfserr_inval
;
5373 status
= nfserr_openmode
;
5377 file_lock
->fl_owner
= (fl_owner_t
)lockowner(nfs4_get_stateowner(&lock_sop
->lo_owner
));
5378 file_lock
->fl_pid
= current
->tgid
;
5379 file_lock
->fl_file
= filp
;
5380 file_lock
->fl_flags
= FL_POSIX
;
5381 file_lock
->fl_lmops
= &nfsd_posix_mng_ops
;
5382 file_lock
->fl_start
= lock
->lk_offset
;
5383 file_lock
->fl_end
= last_byte_offset(lock
->lk_offset
, lock
->lk_length
);
5384 nfs4_transform_lock_offset(file_lock
);
5386 conflock
= locks_alloc_lock();
5388 dprintk("NFSD: %s: unable to allocate lock!\n", __func__
);
5389 status
= nfserr_jukebox
;
5393 err
= vfs_lock_file(filp
, F_SETLK
, file_lock
, conflock
);
5395 case 0: /* success! */
5396 update_stateid(&lock_stp
->st_stid
.sc_stateid
);
5397 memcpy(&lock
->lk_resp_stateid
, &lock_stp
->st_stid
.sc_stateid
,
5401 case (EAGAIN
): /* conflock holds conflicting lock */
5402 status
= nfserr_denied
;
5403 dprintk("NFSD: nfsd4_lock: conflicting lock found!\n");
5404 nfs4_set_lock_denied(conflock
, &lock
->lk_denied
);
5407 status
= nfserr_deadlock
;
5410 dprintk("NFSD: nfsd4_lock: vfs_lock_file() failed! status %d\n",err
);
5411 status
= nfserrno(err
);
5418 /* Bump seqid manually if the 4.0 replay owner is openowner */
5419 if (cstate
->replay_owner
&&
5420 cstate
->replay_owner
!= &lock_sop
->lo_owner
&&
5421 seqid_mutating_err(ntohl(status
)))
5422 lock_sop
->lo_owner
.so_seqid
++;
5424 up_write(&lock_stp
->st_rwsem
);
5427 * If this is a new, never-before-used stateid, and we are
5428 * returning an error, then just go ahead and release it.
5431 release_lock_stateid(lock_stp
);
5433 nfs4_put_stid(&lock_stp
->st_stid
);
5436 nfs4_put_stid(&open_stp
->st_stid
);
5437 nfsd4_bump_seqid(cstate
, status
);
5439 locks_free_lock(file_lock
);
5441 locks_free_lock(conflock
);
5446 * The NFSv4 spec allows a client to do a LOCKT without holding an OPEN,
5447 * so we do a temporary open here just to get an open file to pass to
5448 * vfs_test_lock. (Arguably perhaps test_lock should be done with an
5451 static __be32
nfsd_test_lock(struct svc_rqst
*rqstp
, struct svc_fh
*fhp
, struct file_lock
*lock
)
5454 __be32 err
= nfsd_open(rqstp
, fhp
, S_IFREG
, NFSD_MAY_READ
, &file
);
5456 err
= nfserrno(vfs_test_lock(file
, lock
));
5466 nfsd4_lockt(struct svc_rqst
*rqstp
, struct nfsd4_compound_state
*cstate
,
5467 struct nfsd4_lockt
*lockt
)
5469 struct file_lock
*file_lock
= NULL
;
5470 struct nfs4_lockowner
*lo
= NULL
;
5472 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
5474 if (locks_in_grace(SVC_NET(rqstp
)))
5475 return nfserr_grace
;
5477 if (check_lock_length(lockt
->lt_offset
, lockt
->lt_length
))
5478 return nfserr_inval
;
5480 if (!nfsd4_has_session(cstate
)) {
5481 status
= lookup_clientid(&lockt
->lt_clientid
, cstate
, nn
);
5486 if ((status
= fh_verify(rqstp
, &cstate
->current_fh
, S_IFREG
, 0)))
5489 file_lock
= locks_alloc_lock();
5491 dprintk("NFSD: %s: unable to allocate lock!\n", __func__
);
5492 status
= nfserr_jukebox
;
5496 switch (lockt
->lt_type
) {
5499 file_lock
->fl_type
= F_RDLCK
;
5502 case NFS4_WRITEW_LT
:
5503 file_lock
->fl_type
= F_WRLCK
;
5506 dprintk("NFSD: nfs4_lockt: bad lock type!\n");
5507 status
= nfserr_inval
;
5511 lo
= find_lockowner_str(&lockt
->lt_clientid
, &lockt
->lt_owner
,
5514 file_lock
->fl_owner
= (fl_owner_t
)lo
;
5515 file_lock
->fl_pid
= current
->tgid
;
5516 file_lock
->fl_flags
= FL_POSIX
;
5518 file_lock
->fl_start
= lockt
->lt_offset
;
5519 file_lock
->fl_end
= last_byte_offset(lockt
->lt_offset
, lockt
->lt_length
);
5521 nfs4_transform_lock_offset(file_lock
);
5523 status
= nfsd_test_lock(rqstp
, &cstate
->current_fh
, file_lock
);
5527 if (file_lock
->fl_type
!= F_UNLCK
) {
5528 status
= nfserr_denied
;
5529 nfs4_set_lock_denied(file_lock
, &lockt
->lt_denied
);
5533 nfs4_put_stateowner(&lo
->lo_owner
);
5535 locks_free_lock(file_lock
);
5540 nfsd4_locku(struct svc_rqst
*rqstp
, struct nfsd4_compound_state
*cstate
,
5541 struct nfsd4_locku
*locku
)
5543 struct nfs4_ol_stateid
*stp
;
5544 struct file
*filp
= NULL
;
5545 struct file_lock
*file_lock
= NULL
;
5548 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
5550 dprintk("NFSD: nfsd4_locku: start=%Ld length=%Ld\n",
5551 (long long) locku
->lu_offset
,
5552 (long long) locku
->lu_length
);
5554 if (check_lock_length(locku
->lu_offset
, locku
->lu_length
))
5555 return nfserr_inval
;
5557 status
= nfs4_preprocess_seqid_op(cstate
, locku
->lu_seqid
,
5558 &locku
->lu_stateid
, NFS4_LOCK_STID
,
5562 filp
= find_any_file(stp
->st_stid
.sc_file
);
5564 status
= nfserr_lock_range
;
5567 file_lock
= locks_alloc_lock();
5569 dprintk("NFSD: %s: unable to allocate lock!\n", __func__
);
5570 status
= nfserr_jukebox
;
5574 file_lock
->fl_type
= F_UNLCK
;
5575 file_lock
->fl_owner
= (fl_owner_t
)lockowner(nfs4_get_stateowner(stp
->st_stateowner
));
5576 file_lock
->fl_pid
= current
->tgid
;
5577 file_lock
->fl_file
= filp
;
5578 file_lock
->fl_flags
= FL_POSIX
;
5579 file_lock
->fl_lmops
= &nfsd_posix_mng_ops
;
5580 file_lock
->fl_start
= locku
->lu_offset
;
5582 file_lock
->fl_end
= last_byte_offset(locku
->lu_offset
,
5584 nfs4_transform_lock_offset(file_lock
);
5586 err
= vfs_lock_file(filp
, F_SETLK
, file_lock
, NULL
);
5588 dprintk("NFSD: nfs4_locku: vfs_lock_file failed!\n");
5591 update_stateid(&stp
->st_stid
.sc_stateid
);
5592 memcpy(&locku
->lu_stateid
, &stp
->st_stid
.sc_stateid
, sizeof(stateid_t
));
5596 up_write(&stp
->st_rwsem
);
5597 nfs4_put_stid(&stp
->st_stid
);
5599 nfsd4_bump_seqid(cstate
, status
);
5601 locks_free_lock(file_lock
);
5605 status
= nfserrno(err
);
5611 * true: locks held by lockowner
5612 * false: no locks held by lockowner
5615 check_for_locks(struct nfs4_file
*fp
, struct nfs4_lockowner
*lowner
)
5617 struct file_lock
**flpp
;
5619 struct file
*filp
= find_any_file(fp
);
5620 struct inode
*inode
;
5623 /* Any valid lock stateid should have some sort of access */
5628 inode
= file_inode(filp
);
5630 spin_lock(&inode
->i_lock
);
5631 for (flpp
= &inode
->i_flock
; *flpp
!= NULL
; flpp
= &(*flpp
)->fl_next
) {
5632 if ((*flpp
)->fl_owner
== (fl_owner_t
)lowner
) {
5637 spin_unlock(&inode
->i_lock
);
5643 nfsd4_release_lockowner(struct svc_rqst
*rqstp
,
5644 struct nfsd4_compound_state
*cstate
,
5645 struct nfsd4_release_lockowner
*rlockowner
)
5647 clientid_t
*clid
= &rlockowner
->rl_clientid
;
5648 struct nfs4_stateowner
*sop
;
5649 struct nfs4_lockowner
*lo
= NULL
;
5650 struct nfs4_ol_stateid
*stp
;
5651 struct xdr_netobj
*owner
= &rlockowner
->rl_owner
;
5652 unsigned int hashval
= ownerstr_hashval(owner
);
5654 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
5655 struct nfs4_client
*clp
;
5657 dprintk("nfsd4_release_lockowner clientid: (%08x/%08x):\n",
5658 clid
->cl_boot
, clid
->cl_id
);
5660 status
= lookup_clientid(clid
, cstate
, nn
);
5665 /* Find the matching lock stateowner */
5666 spin_lock(&clp
->cl_lock
);
5667 list_for_each_entry(sop
, &clp
->cl_ownerstr_hashtbl
[hashval
],
5670 if (sop
->so_is_open_owner
|| !same_owner_str(sop
, owner
))
5673 /* see if there are still any locks associated with it */
5674 lo
= lockowner(sop
);
5675 list_for_each_entry(stp
, &sop
->so_stateids
, st_perstateowner
) {
5676 if (check_for_locks(stp
->st_stid
.sc_file
, lo
)) {
5677 status
= nfserr_locks_held
;
5678 spin_unlock(&clp
->cl_lock
);
5683 nfs4_get_stateowner(sop
);
5686 spin_unlock(&clp
->cl_lock
);
5688 release_lockowner(lo
);
5692 static inline struct nfs4_client_reclaim
*
5695 return kmalloc(sizeof(struct nfs4_client_reclaim
), GFP_KERNEL
);
5699 nfs4_has_reclaimed_state(const char *name
, struct nfsd_net
*nn
)
5701 struct nfs4_client_reclaim
*crp
;
5703 crp
= nfsd4_find_reclaim_client(name
, nn
);
5704 return (crp
&& crp
->cr_clp
);
5708 * failure => all reset bets are off, nfserr_no_grace...
5710 struct nfs4_client_reclaim
*
5711 nfs4_client_to_reclaim(const char *name
, struct nfsd_net
*nn
)
5713 unsigned int strhashval
;
5714 struct nfs4_client_reclaim
*crp
;
5716 dprintk("NFSD nfs4_client_to_reclaim NAME: %.*s\n", HEXDIR_LEN
, name
);
5717 crp
= alloc_reclaim();
5719 strhashval
= clientstr_hashval(name
);
5720 INIT_LIST_HEAD(&crp
->cr_strhash
);
5721 list_add(&crp
->cr_strhash
, &nn
->reclaim_str_hashtbl
[strhashval
]);
5722 memcpy(crp
->cr_recdir
, name
, HEXDIR_LEN
);
5724 nn
->reclaim_str_hashtbl_size
++;
5730 nfs4_remove_reclaim_record(struct nfs4_client_reclaim
*crp
, struct nfsd_net
*nn
)
5732 list_del(&crp
->cr_strhash
);
5734 nn
->reclaim_str_hashtbl_size
--;
5738 nfs4_release_reclaim(struct nfsd_net
*nn
)
5740 struct nfs4_client_reclaim
*crp
= NULL
;
5743 for (i
= 0; i
< CLIENT_HASH_SIZE
; i
++) {
5744 while (!list_empty(&nn
->reclaim_str_hashtbl
[i
])) {
5745 crp
= list_entry(nn
->reclaim_str_hashtbl
[i
].next
,
5746 struct nfs4_client_reclaim
, cr_strhash
);
5747 nfs4_remove_reclaim_record(crp
, nn
);
5750 WARN_ON_ONCE(nn
->reclaim_str_hashtbl_size
);
5754 * called from OPEN, CLAIM_PREVIOUS with a new clientid. */
5755 struct nfs4_client_reclaim
*
5756 nfsd4_find_reclaim_client(const char *recdir
, struct nfsd_net
*nn
)
5758 unsigned int strhashval
;
5759 struct nfs4_client_reclaim
*crp
= NULL
;
5761 dprintk("NFSD: nfs4_find_reclaim_client for recdir %s\n", recdir
);
5763 strhashval
= clientstr_hashval(recdir
);
5764 list_for_each_entry(crp
, &nn
->reclaim_str_hashtbl
[strhashval
], cr_strhash
) {
5765 if (same_name(crp
->cr_recdir
, recdir
)) {
5773 * Called from OPEN. Look for clientid in reclaim list.
5776 nfs4_check_open_reclaim(clientid_t
*clid
,
5777 struct nfsd4_compound_state
*cstate
,
5778 struct nfsd_net
*nn
)
5782 /* find clientid in conf_id_hashtbl */
5783 status
= lookup_clientid(clid
, cstate
, nn
);
5785 return nfserr_reclaim_bad
;
5787 if (test_bit(NFSD4_CLIENT_RECLAIM_COMPLETE
, &cstate
->clp
->cl_flags
))
5788 return nfserr_no_grace
;
5790 if (nfsd4_client_record_check(cstate
->clp
))
5791 return nfserr_reclaim_bad
;
5796 #ifdef CONFIG_NFSD_FAULT_INJECTION
5798 put_client(struct nfs4_client
*clp
)
5800 atomic_dec(&clp
->cl_refcount
);
5803 static struct nfs4_client
*
5804 nfsd_find_client(struct sockaddr_storage
*addr
, size_t addr_size
)
5806 struct nfs4_client
*clp
;
5807 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
5810 if (!nfsd_netns_ready(nn
))
5813 list_for_each_entry(clp
, &nn
->client_lru
, cl_lru
) {
5814 if (memcmp(&clp
->cl_addr
, addr
, addr_size
) == 0)
5821 nfsd_inject_print_clients(void)
5823 struct nfs4_client
*clp
;
5825 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
5827 char buf
[INET6_ADDRSTRLEN
];
5829 if (!nfsd_netns_ready(nn
))
5832 spin_lock(&nn
->client_lock
);
5833 list_for_each_entry(clp
, &nn
->client_lru
, cl_lru
) {
5834 rpc_ntop((struct sockaddr
*)&clp
->cl_addr
, buf
, sizeof(buf
));
5835 pr_info("NFS Client: %s\n", buf
);
5838 spin_unlock(&nn
->client_lock
);
5844 nfsd_inject_forget_client(struct sockaddr_storage
*addr
, size_t addr_size
)
5847 struct nfs4_client
*clp
;
5848 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
5851 if (!nfsd_netns_ready(nn
))
5854 spin_lock(&nn
->client_lock
);
5855 clp
= nfsd_find_client(addr
, addr_size
);
5857 if (mark_client_expired_locked(clp
) == nfs_ok
)
5862 spin_unlock(&nn
->client_lock
);
5871 nfsd_inject_forget_clients(u64 max
)
5874 struct nfs4_client
*clp
, *next
;
5875 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
5877 LIST_HEAD(reaplist
);
5879 if (!nfsd_netns_ready(nn
))
5882 spin_lock(&nn
->client_lock
);
5883 list_for_each_entry_safe(clp
, next
, &nn
->client_lru
, cl_lru
) {
5884 if (mark_client_expired_locked(clp
) == nfs_ok
) {
5885 list_add(&clp
->cl_lru
, &reaplist
);
5886 if (max
!= 0 && ++count
>= max
)
5890 spin_unlock(&nn
->client_lock
);
5892 list_for_each_entry_safe(clp
, next
, &reaplist
, cl_lru
)
5898 static void nfsd_print_count(struct nfs4_client
*clp
, unsigned int count
,
5901 char buf
[INET6_ADDRSTRLEN
];
5902 rpc_ntop((struct sockaddr
*)&clp
->cl_addr
, buf
, sizeof(buf
));
5903 printk(KERN_INFO
"NFS Client: %s has %u %s\n", buf
, count
, type
);
5907 nfsd_inject_add_lock_to_list(struct nfs4_ol_stateid
*lst
,
5908 struct list_head
*collect
)
5910 struct nfs4_client
*clp
= lst
->st_stid
.sc_client
;
5911 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
5917 lockdep_assert_held(&nn
->client_lock
);
5918 atomic_inc(&clp
->cl_refcount
);
5919 list_add(&lst
->st_locks
, collect
);
5922 static u64
nfsd_foreach_client_lock(struct nfs4_client
*clp
, u64 max
,
5923 struct list_head
*collect
,
5924 void (*func
)(struct nfs4_ol_stateid
*))
5926 struct nfs4_openowner
*oop
;
5927 struct nfs4_ol_stateid
*stp
, *st_next
;
5928 struct nfs4_ol_stateid
*lst
, *lst_next
;
5931 spin_lock(&clp
->cl_lock
);
5932 list_for_each_entry(oop
, &clp
->cl_openowners
, oo_perclient
) {
5933 list_for_each_entry_safe(stp
, st_next
,
5934 &oop
->oo_owner
.so_stateids
, st_perstateowner
) {
5935 list_for_each_entry_safe(lst
, lst_next
,
5936 &stp
->st_locks
, st_locks
) {
5939 nfsd_inject_add_lock_to_list(lst
,
5944 * Despite the fact that these functions deal
5945 * with 64-bit integers for "count", we must
5946 * ensure that it doesn't blow up the
5947 * clp->cl_refcount. Throw a warning if we
5948 * start to approach INT_MAX here.
5950 WARN_ON_ONCE(count
== (INT_MAX
/ 2));
5957 spin_unlock(&clp
->cl_lock
);
5963 nfsd_collect_client_locks(struct nfs4_client
*clp
, struct list_head
*collect
,
5966 return nfsd_foreach_client_lock(clp
, max
, collect
, unhash_lock_stateid
);
5970 nfsd_print_client_locks(struct nfs4_client
*clp
)
5972 u64 count
= nfsd_foreach_client_lock(clp
, 0, NULL
, NULL
);
5973 nfsd_print_count(clp
, count
, "locked files");
5978 nfsd_inject_print_locks(void)
5980 struct nfs4_client
*clp
;
5982 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
5985 if (!nfsd_netns_ready(nn
))
5988 spin_lock(&nn
->client_lock
);
5989 list_for_each_entry(clp
, &nn
->client_lru
, cl_lru
)
5990 count
+= nfsd_print_client_locks(clp
);
5991 spin_unlock(&nn
->client_lock
);
5997 nfsd_reap_locks(struct list_head
*reaplist
)
5999 struct nfs4_client
*clp
;
6000 struct nfs4_ol_stateid
*stp
, *next
;
6002 list_for_each_entry_safe(stp
, next
, reaplist
, st_locks
) {
6003 list_del_init(&stp
->st_locks
);
6004 clp
= stp
->st_stid
.sc_client
;
6005 nfs4_put_stid(&stp
->st_stid
);
6011 nfsd_inject_forget_client_locks(struct sockaddr_storage
*addr
, size_t addr_size
)
6013 unsigned int count
= 0;
6014 struct nfs4_client
*clp
;
6015 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
6017 LIST_HEAD(reaplist
);
6019 if (!nfsd_netns_ready(nn
))
6022 spin_lock(&nn
->client_lock
);
6023 clp
= nfsd_find_client(addr
, addr_size
);
6025 count
= nfsd_collect_client_locks(clp
, &reaplist
, 0);
6026 spin_unlock(&nn
->client_lock
);
6027 nfsd_reap_locks(&reaplist
);
6032 nfsd_inject_forget_locks(u64 max
)
6035 struct nfs4_client
*clp
;
6036 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
6038 LIST_HEAD(reaplist
);
6040 if (!nfsd_netns_ready(nn
))
6043 spin_lock(&nn
->client_lock
);
6044 list_for_each_entry(clp
, &nn
->client_lru
, cl_lru
) {
6045 count
+= nfsd_collect_client_locks(clp
, &reaplist
, max
- count
);
6046 if (max
!= 0 && count
>= max
)
6049 spin_unlock(&nn
->client_lock
);
6050 nfsd_reap_locks(&reaplist
);
6055 nfsd_foreach_client_openowner(struct nfs4_client
*clp
, u64 max
,
6056 struct list_head
*collect
,
6057 void (*func
)(struct nfs4_openowner
*))
6059 struct nfs4_openowner
*oop
, *next
;
6060 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
6064 lockdep_assert_held(&nn
->client_lock
);
6066 spin_lock(&clp
->cl_lock
);
6067 list_for_each_entry_safe(oop
, next
, &clp
->cl_openowners
, oo_perclient
) {
6071 atomic_inc(&clp
->cl_refcount
);
6072 list_add(&oop
->oo_perclient
, collect
);
6077 * Despite the fact that these functions deal with
6078 * 64-bit integers for "count", we must ensure that
6079 * it doesn't blow up the clp->cl_refcount. Throw a
6080 * warning if we start to approach INT_MAX here.
6082 WARN_ON_ONCE(count
== (INT_MAX
/ 2));
6086 spin_unlock(&clp
->cl_lock
);
6092 nfsd_print_client_openowners(struct nfs4_client
*clp
)
6094 u64 count
= nfsd_foreach_client_openowner(clp
, 0, NULL
, NULL
);
6096 nfsd_print_count(clp
, count
, "openowners");
6101 nfsd_collect_client_openowners(struct nfs4_client
*clp
,
6102 struct list_head
*collect
, u64 max
)
6104 return nfsd_foreach_client_openowner(clp
, max
, collect
,
6105 unhash_openowner_locked
);
6109 nfsd_inject_print_openowners(void)
6111 struct nfs4_client
*clp
;
6113 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
6116 if (!nfsd_netns_ready(nn
))
6119 spin_lock(&nn
->client_lock
);
6120 list_for_each_entry(clp
, &nn
->client_lru
, cl_lru
)
6121 count
+= nfsd_print_client_openowners(clp
);
6122 spin_unlock(&nn
->client_lock
);
6128 nfsd_reap_openowners(struct list_head
*reaplist
)
6130 struct nfs4_client
*clp
;
6131 struct nfs4_openowner
*oop
, *next
;
6133 list_for_each_entry_safe(oop
, next
, reaplist
, oo_perclient
) {
6134 list_del_init(&oop
->oo_perclient
);
6135 clp
= oop
->oo_owner
.so_client
;
6136 release_openowner(oop
);
6142 nfsd_inject_forget_client_openowners(struct sockaddr_storage
*addr
,
6145 unsigned int count
= 0;
6146 struct nfs4_client
*clp
;
6147 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
6149 LIST_HEAD(reaplist
);
6151 if (!nfsd_netns_ready(nn
))
6154 spin_lock(&nn
->client_lock
);
6155 clp
= nfsd_find_client(addr
, addr_size
);
6157 count
= nfsd_collect_client_openowners(clp
, &reaplist
, 0);
6158 spin_unlock(&nn
->client_lock
);
6159 nfsd_reap_openowners(&reaplist
);
6164 nfsd_inject_forget_openowners(u64 max
)
6167 struct nfs4_client
*clp
;
6168 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
6170 LIST_HEAD(reaplist
);
6172 if (!nfsd_netns_ready(nn
))
6175 spin_lock(&nn
->client_lock
);
6176 list_for_each_entry(clp
, &nn
->client_lru
, cl_lru
) {
6177 count
+= nfsd_collect_client_openowners(clp
, &reaplist
,
6179 if (max
!= 0 && count
>= max
)
6182 spin_unlock(&nn
->client_lock
);
6183 nfsd_reap_openowners(&reaplist
);
6187 static u64
nfsd_find_all_delegations(struct nfs4_client
*clp
, u64 max
,
6188 struct list_head
*victims
)
6190 struct nfs4_delegation
*dp
, *next
;
6191 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
6195 lockdep_assert_held(&nn
->client_lock
);
6197 spin_lock(&state_lock
);
6198 list_for_each_entry_safe(dp
, next
, &clp
->cl_delegations
, dl_perclnt
) {
6201 * It's not safe to mess with delegations that have a
6202 * non-zero dl_time. They might have already been broken
6203 * and could be processed by the laundromat outside of
6204 * the state_lock. Just leave them be.
6206 if (dp
->dl_time
!= 0)
6209 atomic_inc(&clp
->cl_refcount
);
6210 unhash_delegation_locked(dp
);
6211 list_add(&dp
->dl_recall_lru
, victims
);
6215 * Despite the fact that these functions deal with
6216 * 64-bit integers for "count", we must ensure that
6217 * it doesn't blow up the clp->cl_refcount. Throw a
6218 * warning if we start to approach INT_MAX here.
6220 WARN_ON_ONCE(count
== (INT_MAX
/ 2));
6224 spin_unlock(&state_lock
);
6229 nfsd_print_client_delegations(struct nfs4_client
*clp
)
6231 u64 count
= nfsd_find_all_delegations(clp
, 0, NULL
);
6233 nfsd_print_count(clp
, count
, "delegations");
6238 nfsd_inject_print_delegations(void)
6240 struct nfs4_client
*clp
;
6242 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
6245 if (!nfsd_netns_ready(nn
))
6248 spin_lock(&nn
->client_lock
);
6249 list_for_each_entry(clp
, &nn
->client_lru
, cl_lru
)
6250 count
+= nfsd_print_client_delegations(clp
);
6251 spin_unlock(&nn
->client_lock
);
6257 nfsd_forget_delegations(struct list_head
*reaplist
)
6259 struct nfs4_client
*clp
;
6260 struct nfs4_delegation
*dp
, *next
;
6262 list_for_each_entry_safe(dp
, next
, reaplist
, dl_recall_lru
) {
6263 list_del_init(&dp
->dl_recall_lru
);
6264 clp
= dp
->dl_stid
.sc_client
;
6265 revoke_delegation(dp
);
6271 nfsd_inject_forget_client_delegations(struct sockaddr_storage
*addr
,
6275 struct nfs4_client
*clp
;
6276 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
6278 LIST_HEAD(reaplist
);
6280 if (!nfsd_netns_ready(nn
))
6283 spin_lock(&nn
->client_lock
);
6284 clp
= nfsd_find_client(addr
, addr_size
);
6286 count
= nfsd_find_all_delegations(clp
, 0, &reaplist
);
6287 spin_unlock(&nn
->client_lock
);
6289 nfsd_forget_delegations(&reaplist
);
6294 nfsd_inject_forget_delegations(u64 max
)
6297 struct nfs4_client
*clp
;
6298 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
6300 LIST_HEAD(reaplist
);
6302 if (!nfsd_netns_ready(nn
))
6305 spin_lock(&nn
->client_lock
);
6306 list_for_each_entry(clp
, &nn
->client_lru
, cl_lru
) {
6307 count
+= nfsd_find_all_delegations(clp
, max
- count
, &reaplist
);
6308 if (max
!= 0 && count
>= max
)
6311 spin_unlock(&nn
->client_lock
);
6312 nfsd_forget_delegations(&reaplist
);
6317 nfsd_recall_delegations(struct list_head
*reaplist
)
6319 struct nfs4_client
*clp
;
6320 struct nfs4_delegation
*dp
, *next
;
6322 list_for_each_entry_safe(dp
, next
, reaplist
, dl_recall_lru
) {
6323 list_del_init(&dp
->dl_recall_lru
);
6324 clp
= dp
->dl_stid
.sc_client
;
6326 * We skipped all entries that had a zero dl_time before,
6327 * so we can now reset the dl_time back to 0. If a delegation
6328 * break comes in now, then it won't make any difference since
6329 * we're recalling it either way.
6331 spin_lock(&state_lock
);
6333 spin_unlock(&state_lock
);
6334 nfsd_break_one_deleg(dp
);
6340 nfsd_inject_recall_client_delegations(struct sockaddr_storage
*addr
,
6344 struct nfs4_client
*clp
;
6345 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
6347 LIST_HEAD(reaplist
);
6349 if (!nfsd_netns_ready(nn
))
6352 spin_lock(&nn
->client_lock
);
6353 clp
= nfsd_find_client(addr
, addr_size
);
6355 count
= nfsd_find_all_delegations(clp
, 0, &reaplist
);
6356 spin_unlock(&nn
->client_lock
);
6358 nfsd_recall_delegations(&reaplist
);
6363 nfsd_inject_recall_delegations(u64 max
)
6366 struct nfs4_client
*clp
, *next
;
6367 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
6369 LIST_HEAD(reaplist
);
6371 if (!nfsd_netns_ready(nn
))
6374 spin_lock(&nn
->client_lock
);
6375 list_for_each_entry_safe(clp
, next
, &nn
->client_lru
, cl_lru
) {
6376 count
+= nfsd_find_all_delegations(clp
, max
- count
, &reaplist
);
6377 if (max
!= 0 && ++count
>= max
)
6380 spin_unlock(&nn
->client_lock
);
6381 nfsd_recall_delegations(&reaplist
);
6384 #endif /* CONFIG_NFSD_FAULT_INJECTION */
6387 * Since the lifetime of a delegation isn't limited to that of an open, a
6388 * client may quite reasonably hang on to a delegation as long as it has
6389 * the inode cached. This becomes an obvious problem the first time a
6390 * client's inode cache approaches the size of the server's total memory.
6392 * For now we avoid this problem by imposing a hard limit on the number
6393 * of delegations, which varies according to the server's memory size.
6396 set_max_delegations(void)
6399 * Allow at most 4 delegations per megabyte of RAM. Quick
6400 * estimates suggest that in the worst case (where every delegation
6401 * is for a different inode), a delegation could take about 1.5K,
6402 * giving a worst case usage of about 6% of memory.
6404 max_delegations
= nr_free_buffer_pages() >> (20 - 2 - PAGE_SHIFT
);
6407 static int nfs4_state_create_net(struct net
*net
)
6409 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
6412 nn
->conf_id_hashtbl
= kmalloc(sizeof(struct list_head
) *
6413 CLIENT_HASH_SIZE
, GFP_KERNEL
);
6414 if (!nn
->conf_id_hashtbl
)
6416 nn
->unconf_id_hashtbl
= kmalloc(sizeof(struct list_head
) *
6417 CLIENT_HASH_SIZE
, GFP_KERNEL
);
6418 if (!nn
->unconf_id_hashtbl
)
6420 nn
->sessionid_hashtbl
= kmalloc(sizeof(struct list_head
) *
6421 SESSION_HASH_SIZE
, GFP_KERNEL
);
6422 if (!nn
->sessionid_hashtbl
)
6425 for (i
= 0; i
< CLIENT_HASH_SIZE
; i
++) {
6426 INIT_LIST_HEAD(&nn
->conf_id_hashtbl
[i
]);
6427 INIT_LIST_HEAD(&nn
->unconf_id_hashtbl
[i
]);
6429 for (i
= 0; i
< SESSION_HASH_SIZE
; i
++)
6430 INIT_LIST_HEAD(&nn
->sessionid_hashtbl
[i
]);
6431 nn
->conf_name_tree
= RB_ROOT
;
6432 nn
->unconf_name_tree
= RB_ROOT
;
6433 INIT_LIST_HEAD(&nn
->client_lru
);
6434 INIT_LIST_HEAD(&nn
->close_lru
);
6435 INIT_LIST_HEAD(&nn
->del_recall_lru
);
6436 spin_lock_init(&nn
->client_lock
);
6438 INIT_DELAYED_WORK(&nn
->laundromat_work
, laundromat_main
);
6444 kfree(nn
->unconf_id_hashtbl
);
6446 kfree(nn
->conf_id_hashtbl
);
6452 nfs4_state_destroy_net(struct net
*net
)
6455 struct nfs4_client
*clp
= NULL
;
6456 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
6458 for (i
= 0; i
< CLIENT_HASH_SIZE
; i
++) {
6459 while (!list_empty(&nn
->conf_id_hashtbl
[i
])) {
6460 clp
= list_entry(nn
->conf_id_hashtbl
[i
].next
, struct nfs4_client
, cl_idhash
);
6461 destroy_client(clp
);
6465 for (i
= 0; i
< CLIENT_HASH_SIZE
; i
++) {
6466 while (!list_empty(&nn
->unconf_id_hashtbl
[i
])) {
6467 clp
= list_entry(nn
->unconf_id_hashtbl
[i
].next
, struct nfs4_client
, cl_idhash
);
6468 destroy_client(clp
);
6472 kfree(nn
->sessionid_hashtbl
);
6473 kfree(nn
->unconf_id_hashtbl
);
6474 kfree(nn
->conf_id_hashtbl
);
6479 nfs4_state_start_net(struct net
*net
)
6481 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
6484 ret
= nfs4_state_create_net(net
);
6487 nn
->boot_time
= get_seconds();
6488 nn
->grace_ended
= false;
6489 locks_start_grace(net
, &nn
->nfsd4_manager
);
6490 nfsd4_client_tracking_init(net
);
6491 printk(KERN_INFO
"NFSD: starting %ld-second grace period (net %p)\n",
6492 nn
->nfsd4_grace
, net
);
6493 queue_delayed_work(laundry_wq
, &nn
->laundromat_work
, nn
->nfsd4_grace
* HZ
);
6497 /* initialization to perform when the nfsd service is started: */
6500 nfs4_state_start(void)
6504 ret
= set_callback_cred();
6507 laundry_wq
= create_singlethread_workqueue("nfsd4");
6508 if (laundry_wq
== NULL
) {
6512 ret
= nfsd4_create_callback_queue();
6514 goto out_free_laundry
;
6516 set_max_delegations();
6521 destroy_workqueue(laundry_wq
);
6527 nfs4_state_shutdown_net(struct net
*net
)
6529 struct nfs4_delegation
*dp
= NULL
;
6530 struct list_head
*pos
, *next
, reaplist
;
6531 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
6533 cancel_delayed_work_sync(&nn
->laundromat_work
);
6534 locks_end_grace(&nn
->nfsd4_manager
);
6536 INIT_LIST_HEAD(&reaplist
);
6537 spin_lock(&state_lock
);
6538 list_for_each_safe(pos
, next
, &nn
->del_recall_lru
) {
6539 dp
= list_entry (pos
, struct nfs4_delegation
, dl_recall_lru
);
6540 unhash_delegation_locked(dp
);
6541 list_add(&dp
->dl_recall_lru
, &reaplist
);
6543 spin_unlock(&state_lock
);
6544 list_for_each_safe(pos
, next
, &reaplist
) {
6545 dp
= list_entry (pos
, struct nfs4_delegation
, dl_recall_lru
);
6546 list_del_init(&dp
->dl_recall_lru
);
6547 nfs4_put_deleg_lease(dp
->dl_stid
.sc_file
);
6548 nfs4_put_stid(&dp
->dl_stid
);
6551 nfsd4_client_tracking_exit(net
);
6552 nfs4_state_destroy_net(net
);
6556 nfs4_state_shutdown(void)
6558 destroy_workqueue(laundry_wq
);
6559 nfsd4_destroy_callback_queue();
6563 get_stateid(struct nfsd4_compound_state
*cstate
, stateid_t
*stateid
)
6565 if (HAS_STATE_ID(cstate
, CURRENT_STATE_ID_FLAG
) && CURRENT_STATEID(stateid
))
6566 memcpy(stateid
, &cstate
->current_stateid
, sizeof(stateid_t
));
6570 put_stateid(struct nfsd4_compound_state
*cstate
, stateid_t
*stateid
)
6572 if (cstate
->minorversion
) {
6573 memcpy(&cstate
->current_stateid
, stateid
, sizeof(stateid_t
));
6574 SET_STATE_ID(cstate
, CURRENT_STATE_ID_FLAG
);
6579 clear_current_stateid(struct nfsd4_compound_state
*cstate
)
6581 CLEAR_STATE_ID(cstate
, CURRENT_STATE_ID_FLAG
);
6585 * functions to set current state id
6588 nfsd4_set_opendowngradestateid(struct nfsd4_compound_state
*cstate
, struct nfsd4_open_downgrade
*odp
)
6590 put_stateid(cstate
, &odp
->od_stateid
);
6594 nfsd4_set_openstateid(struct nfsd4_compound_state
*cstate
, struct nfsd4_open
*open
)
6596 put_stateid(cstate
, &open
->op_stateid
);
6600 nfsd4_set_closestateid(struct nfsd4_compound_state
*cstate
, struct nfsd4_close
*close
)
6602 put_stateid(cstate
, &close
->cl_stateid
);
6606 nfsd4_set_lockstateid(struct nfsd4_compound_state
*cstate
, struct nfsd4_lock
*lock
)
6608 put_stateid(cstate
, &lock
->lk_resp_stateid
);
6612 * functions to consume current state id
6616 nfsd4_get_opendowngradestateid(struct nfsd4_compound_state
*cstate
, struct nfsd4_open_downgrade
*odp
)
6618 get_stateid(cstate
, &odp
->od_stateid
);
6622 nfsd4_get_delegreturnstateid(struct nfsd4_compound_state
*cstate
, struct nfsd4_delegreturn
*drp
)
6624 get_stateid(cstate
, &drp
->dr_stateid
);
6628 nfsd4_get_freestateid(struct nfsd4_compound_state
*cstate
, struct nfsd4_free_stateid
*fsp
)
6630 get_stateid(cstate
, &fsp
->fr_stateid
);
6634 nfsd4_get_setattrstateid(struct nfsd4_compound_state
*cstate
, struct nfsd4_setattr
*setattr
)
6636 get_stateid(cstate
, &setattr
->sa_stateid
);
6640 nfsd4_get_closestateid(struct nfsd4_compound_state
*cstate
, struct nfsd4_close
*close
)
6642 get_stateid(cstate
, &close
->cl_stateid
);
6646 nfsd4_get_lockustateid(struct nfsd4_compound_state
*cstate
, struct nfsd4_locku
*locku
)
6648 get_stateid(cstate
, &locku
->lu_stateid
);
6652 nfsd4_get_readstateid(struct nfsd4_compound_state
*cstate
, struct nfsd4_read
*read
)
6654 get_stateid(cstate
, &read
->rd_stateid
);
6658 nfsd4_get_writestateid(struct nfsd4_compound_state
*cstate
, struct nfsd4_write
*write
)
6660 get_stateid(cstate
, &write
->wr_stateid
);