2 * Copyright (c) 2001 The Regents of the University of Michigan.
5 * Kendrick Smith <kmsmith@umich.edu>
6 * Andy Adamson <kandros@umich.edu>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the name of the University nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
22 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
23 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
28 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
29 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
30 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
31 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 #include <linux/file.h>
37 #include <linux/slab.h>
38 #include <linux/namei.h>
39 #include <linux/swap.h>
40 #include <linux/pagemap.h>
41 #include <linux/ratelimit.h>
42 #include <linux/sunrpc/svcauth_gss.h>
43 #include <linux/sunrpc/addr.h>
44 #include <linux/jhash.h>
48 #include "current_stateid.h"
53 #define NFSDDBG_FACILITY NFSDDBG_PROC
55 #define all_ones {{~0,~0},~0}
56 static const stateid_t one_stateid
= {
58 .si_opaque
= all_ones
,
60 static const stateid_t zero_stateid
= {
63 static const stateid_t currentstateid
= {
66 static const stateid_t close_stateid
= {
67 .si_generation
= 0xffffffffU
,
70 static u64 current_sessionid
= 1;
72 #define ZERO_STATEID(stateid) (!memcmp((stateid), &zero_stateid, sizeof(stateid_t)))
73 #define ONE_STATEID(stateid) (!memcmp((stateid), &one_stateid, sizeof(stateid_t)))
74 #define CURRENT_STATEID(stateid) (!memcmp((stateid), ¤tstateid, sizeof(stateid_t)))
75 #define CLOSE_STATEID(stateid) (!memcmp((stateid), &close_stateid, sizeof(stateid_t)))
77 /* forward declarations */
78 static bool check_for_locks(struct nfs4_file
*fp
, struct nfs4_lockowner
*lowner
);
79 static void nfs4_free_ol_stateid(struct nfs4_stid
*stid
);
80 void nfsd4_end_grace(struct nfsd_net
*nn
);
85 * Currently used for the del_recall_lru and file hash table. In an
86 * effort to decrease the scope of the client_mutex, this spinlock may
87 * eventually cover more:
89 static DEFINE_SPINLOCK(state_lock
);
91 enum nfsd4_st_mutex_lock_subclass
{
92 OPEN_STATEID_MUTEX
= 0,
93 LOCK_STATEID_MUTEX
= 1,
97 * A waitqueue for all in-progress 4.0 CLOSE operations that are waiting for
98 * the refcount on the open stateid to drop.
100 static DECLARE_WAIT_QUEUE_HEAD(close_wq
);
102 static struct kmem_cache
*client_slab
;
103 static struct kmem_cache
*openowner_slab
;
104 static struct kmem_cache
*lockowner_slab
;
105 static struct kmem_cache
*file_slab
;
106 static struct kmem_cache
*stateid_slab
;
107 static struct kmem_cache
*deleg_slab
;
108 static struct kmem_cache
*odstate_slab
;
110 static void free_session(struct nfsd4_session
*);
112 static const struct nfsd4_callback_ops nfsd4_cb_recall_ops
;
113 static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops
;
115 static bool is_session_dead(struct nfsd4_session
*ses
)
117 return ses
->se_flags
& NFS4_SESSION_DEAD
;
120 static __be32
mark_session_dead_locked(struct nfsd4_session
*ses
, int ref_held_by_me
)
122 if (atomic_read(&ses
->se_ref
) > ref_held_by_me
)
123 return nfserr_jukebox
;
124 ses
->se_flags
|= NFS4_SESSION_DEAD
;
128 static bool is_client_expired(struct nfs4_client
*clp
)
130 return clp
->cl_time
== 0;
133 static __be32
get_client_locked(struct nfs4_client
*clp
)
135 struct nfsd_net
*nn
= net_generic(clp
->net
, nfsd_net_id
);
137 lockdep_assert_held(&nn
->client_lock
);
139 if (is_client_expired(clp
))
140 return nfserr_expired
;
141 atomic_inc(&clp
->cl_refcount
);
145 /* must be called under the client_lock */
147 renew_client_locked(struct nfs4_client
*clp
)
149 struct nfsd_net
*nn
= net_generic(clp
->net
, nfsd_net_id
);
151 if (is_client_expired(clp
)) {
153 printk("%s: client (clientid %08x/%08x) already expired\n",
155 clp
->cl_clientid
.cl_boot
,
156 clp
->cl_clientid
.cl_id
);
160 dprintk("renewing client (clientid %08x/%08x)\n",
161 clp
->cl_clientid
.cl_boot
,
162 clp
->cl_clientid
.cl_id
);
163 list_move_tail(&clp
->cl_lru
, &nn
->client_lru
);
164 clp
->cl_time
= get_seconds();
167 static void put_client_renew_locked(struct nfs4_client
*clp
)
169 struct nfsd_net
*nn
= net_generic(clp
->net
, nfsd_net_id
);
171 lockdep_assert_held(&nn
->client_lock
);
173 if (!atomic_dec_and_test(&clp
->cl_refcount
))
175 if (!is_client_expired(clp
))
176 renew_client_locked(clp
);
179 static void put_client_renew(struct nfs4_client
*clp
)
181 struct nfsd_net
*nn
= net_generic(clp
->net
, nfsd_net_id
);
183 if (!atomic_dec_and_lock(&clp
->cl_refcount
, &nn
->client_lock
))
185 if (!is_client_expired(clp
))
186 renew_client_locked(clp
);
187 spin_unlock(&nn
->client_lock
);
190 static __be32
nfsd4_get_session_locked(struct nfsd4_session
*ses
)
194 if (is_session_dead(ses
))
195 return nfserr_badsession
;
196 status
= get_client_locked(ses
->se_client
);
199 atomic_inc(&ses
->se_ref
);
203 static void nfsd4_put_session_locked(struct nfsd4_session
*ses
)
205 struct nfs4_client
*clp
= ses
->se_client
;
206 struct nfsd_net
*nn
= net_generic(clp
->net
, nfsd_net_id
);
208 lockdep_assert_held(&nn
->client_lock
);
210 if (atomic_dec_and_test(&ses
->se_ref
) && is_session_dead(ses
))
212 put_client_renew_locked(clp
);
215 static void nfsd4_put_session(struct nfsd4_session
*ses
)
217 struct nfs4_client
*clp
= ses
->se_client
;
218 struct nfsd_net
*nn
= net_generic(clp
->net
, nfsd_net_id
);
220 spin_lock(&nn
->client_lock
);
221 nfsd4_put_session_locked(ses
);
222 spin_unlock(&nn
->client_lock
);
225 static struct nfsd4_blocked_lock
*
226 find_blocked_lock(struct nfs4_lockowner
*lo
, struct knfsd_fh
*fh
,
229 struct nfsd4_blocked_lock
*cur
, *found
= NULL
;
231 spin_lock(&nn
->blocked_locks_lock
);
232 list_for_each_entry(cur
, &lo
->lo_blocked
, nbl_list
) {
233 if (fh_match(fh
, &cur
->nbl_fh
)) {
234 list_del_init(&cur
->nbl_list
);
235 list_del_init(&cur
->nbl_lru
);
240 spin_unlock(&nn
->blocked_locks_lock
);
242 locks_delete_block(&found
->nbl_lock
);
246 static struct nfsd4_blocked_lock
*
247 find_or_allocate_block(struct nfs4_lockowner
*lo
, struct knfsd_fh
*fh
,
250 struct nfsd4_blocked_lock
*nbl
;
252 nbl
= find_blocked_lock(lo
, fh
, nn
);
254 nbl
= kmalloc(sizeof(*nbl
), GFP_KERNEL
);
256 fh_copy_shallow(&nbl
->nbl_fh
, fh
);
257 locks_init_lock(&nbl
->nbl_lock
);
258 nfsd4_init_cb(&nbl
->nbl_cb
, lo
->lo_owner
.so_client
,
259 &nfsd4_cb_notify_lock_ops
,
260 NFSPROC4_CLNT_CB_NOTIFY_LOCK
);
267 free_blocked_lock(struct nfsd4_blocked_lock
*nbl
)
269 locks_delete_block(&nbl
->nbl_lock
);
270 locks_release_private(&nbl
->nbl_lock
);
275 remove_blocked_locks(struct nfs4_lockowner
*lo
)
277 struct nfs4_client
*clp
= lo
->lo_owner
.so_client
;
278 struct nfsd_net
*nn
= net_generic(clp
->net
, nfsd_net_id
);
279 struct nfsd4_blocked_lock
*nbl
;
282 /* Dequeue all blocked locks */
283 spin_lock(&nn
->blocked_locks_lock
);
284 while (!list_empty(&lo
->lo_blocked
)) {
285 nbl
= list_first_entry(&lo
->lo_blocked
,
286 struct nfsd4_blocked_lock
,
288 list_del_init(&nbl
->nbl_list
);
289 list_move(&nbl
->nbl_lru
, &reaplist
);
291 spin_unlock(&nn
->blocked_locks_lock
);
294 while (!list_empty(&reaplist
)) {
295 nbl
= list_first_entry(&reaplist
, struct nfsd4_blocked_lock
,
297 list_del_init(&nbl
->nbl_lru
);
298 free_blocked_lock(nbl
);
303 nfsd4_cb_notify_lock_prepare(struct nfsd4_callback
*cb
)
305 struct nfsd4_blocked_lock
*nbl
= container_of(cb
,
306 struct nfsd4_blocked_lock
, nbl_cb
);
307 locks_delete_block(&nbl
->nbl_lock
);
311 nfsd4_cb_notify_lock_done(struct nfsd4_callback
*cb
, struct rpc_task
*task
)
314 * Since this is just an optimization, we don't try very hard if it
315 * turns out not to succeed. We'll requeue it on NFS4ERR_DELAY, and
316 * just quit trying on anything else.
318 switch (task
->tk_status
) {
320 rpc_delay(task
, 1 * HZ
);
328 nfsd4_cb_notify_lock_release(struct nfsd4_callback
*cb
)
330 struct nfsd4_blocked_lock
*nbl
= container_of(cb
,
331 struct nfsd4_blocked_lock
, nbl_cb
);
333 free_blocked_lock(nbl
);
336 static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops
= {
337 .prepare
= nfsd4_cb_notify_lock_prepare
,
338 .done
= nfsd4_cb_notify_lock_done
,
339 .release
= nfsd4_cb_notify_lock_release
,
342 static inline struct nfs4_stateowner
*
343 nfs4_get_stateowner(struct nfs4_stateowner
*sop
)
345 atomic_inc(&sop
->so_count
);
350 same_owner_str(struct nfs4_stateowner
*sop
, struct xdr_netobj
*owner
)
352 return (sop
->so_owner
.len
== owner
->len
) &&
353 0 == memcmp(sop
->so_owner
.data
, owner
->data
, owner
->len
);
356 static struct nfs4_openowner
*
357 find_openstateowner_str_locked(unsigned int hashval
, struct nfsd4_open
*open
,
358 struct nfs4_client
*clp
)
360 struct nfs4_stateowner
*so
;
362 lockdep_assert_held(&clp
->cl_lock
);
364 list_for_each_entry(so
, &clp
->cl_ownerstr_hashtbl
[hashval
],
366 if (!so
->so_is_open_owner
)
368 if (same_owner_str(so
, &open
->op_owner
))
369 return openowner(nfs4_get_stateowner(so
));
374 static struct nfs4_openowner
*
375 find_openstateowner_str(unsigned int hashval
, struct nfsd4_open
*open
,
376 struct nfs4_client
*clp
)
378 struct nfs4_openowner
*oo
;
380 spin_lock(&clp
->cl_lock
);
381 oo
= find_openstateowner_str_locked(hashval
, open
, clp
);
382 spin_unlock(&clp
->cl_lock
);
387 opaque_hashval(const void *ptr
, int nbytes
)
389 unsigned char *cptr
= (unsigned char *) ptr
;
399 static void nfsd4_free_file_rcu(struct rcu_head
*rcu
)
401 struct nfs4_file
*fp
= container_of(rcu
, struct nfs4_file
, fi_rcu
);
403 kmem_cache_free(file_slab
, fp
);
407 put_nfs4_file(struct nfs4_file
*fi
)
409 might_lock(&state_lock
);
411 if (refcount_dec_and_lock(&fi
->fi_ref
, &state_lock
)) {
412 hlist_del_rcu(&fi
->fi_hash
);
413 spin_unlock(&state_lock
);
414 WARN_ON_ONCE(!list_empty(&fi
->fi_clnt_odstate
));
415 WARN_ON_ONCE(!list_empty(&fi
->fi_delegations
));
416 call_rcu(&fi
->fi_rcu
, nfsd4_free_file_rcu
);
421 __nfs4_get_fd(struct nfs4_file
*f
, int oflag
)
423 if (f
->fi_fds
[oflag
])
424 return get_file(f
->fi_fds
[oflag
]);
429 find_writeable_file_locked(struct nfs4_file
*f
)
433 lockdep_assert_held(&f
->fi_lock
);
435 ret
= __nfs4_get_fd(f
, O_WRONLY
);
437 ret
= __nfs4_get_fd(f
, O_RDWR
);
442 find_writeable_file(struct nfs4_file
*f
)
446 spin_lock(&f
->fi_lock
);
447 ret
= find_writeable_file_locked(f
);
448 spin_unlock(&f
->fi_lock
);
453 static struct file
*find_readable_file_locked(struct nfs4_file
*f
)
457 lockdep_assert_held(&f
->fi_lock
);
459 ret
= __nfs4_get_fd(f
, O_RDONLY
);
461 ret
= __nfs4_get_fd(f
, O_RDWR
);
466 find_readable_file(struct nfs4_file
*f
)
470 spin_lock(&f
->fi_lock
);
471 ret
= find_readable_file_locked(f
);
472 spin_unlock(&f
->fi_lock
);
478 find_any_file(struct nfs4_file
*f
)
482 spin_lock(&f
->fi_lock
);
483 ret
= __nfs4_get_fd(f
, O_RDWR
);
485 ret
= __nfs4_get_fd(f
, O_WRONLY
);
487 ret
= __nfs4_get_fd(f
, O_RDONLY
);
489 spin_unlock(&f
->fi_lock
);
493 static atomic_long_t num_delegations
;
494 unsigned long max_delegations
;
497 * Open owner state (share locks)
500 /* hash tables for lock and open owners */
501 #define OWNER_HASH_BITS 8
502 #define OWNER_HASH_SIZE (1 << OWNER_HASH_BITS)
503 #define OWNER_HASH_MASK (OWNER_HASH_SIZE - 1)
505 static unsigned int ownerstr_hashval(struct xdr_netobj
*ownername
)
509 ret
= opaque_hashval(ownername
->data
, ownername
->len
);
510 return ret
& OWNER_HASH_MASK
;
513 /* hash table for nfs4_file */
514 #define FILE_HASH_BITS 8
515 #define FILE_HASH_SIZE (1 << FILE_HASH_BITS)
517 static unsigned int nfsd_fh_hashval(struct knfsd_fh
*fh
)
519 return jhash2(fh
->fh_base
.fh_pad
, XDR_QUADLEN(fh
->fh_size
), 0);
522 static unsigned int file_hashval(struct knfsd_fh
*fh
)
524 return nfsd_fh_hashval(fh
) & (FILE_HASH_SIZE
- 1);
527 static struct hlist_head file_hashtbl
[FILE_HASH_SIZE
];
530 __nfs4_file_get_access(struct nfs4_file
*fp
, u32 access
)
532 lockdep_assert_held(&fp
->fi_lock
);
534 if (access
& NFS4_SHARE_ACCESS_WRITE
)
535 atomic_inc(&fp
->fi_access
[O_WRONLY
]);
536 if (access
& NFS4_SHARE_ACCESS_READ
)
537 atomic_inc(&fp
->fi_access
[O_RDONLY
]);
541 nfs4_file_get_access(struct nfs4_file
*fp
, u32 access
)
543 lockdep_assert_held(&fp
->fi_lock
);
545 /* Does this access mode make sense? */
546 if (access
& ~NFS4_SHARE_ACCESS_BOTH
)
549 /* Does it conflict with a deny mode already set? */
550 if ((access
& fp
->fi_share_deny
) != 0)
551 return nfserr_share_denied
;
553 __nfs4_file_get_access(fp
, access
);
557 static __be32
nfs4_file_check_deny(struct nfs4_file
*fp
, u32 deny
)
559 /* Common case is that there is no deny mode. */
561 /* Does this deny mode make sense? */
562 if (deny
& ~NFS4_SHARE_DENY_BOTH
)
565 if ((deny
& NFS4_SHARE_DENY_READ
) &&
566 atomic_read(&fp
->fi_access
[O_RDONLY
]))
567 return nfserr_share_denied
;
569 if ((deny
& NFS4_SHARE_DENY_WRITE
) &&
570 atomic_read(&fp
->fi_access
[O_WRONLY
]))
571 return nfserr_share_denied
;
576 static void __nfs4_file_put_access(struct nfs4_file
*fp
, int oflag
)
578 might_lock(&fp
->fi_lock
);
580 if (atomic_dec_and_lock(&fp
->fi_access
[oflag
], &fp
->fi_lock
)) {
581 struct file
*f1
= NULL
;
582 struct file
*f2
= NULL
;
584 swap(f1
, fp
->fi_fds
[oflag
]);
585 if (atomic_read(&fp
->fi_access
[1 - oflag
]) == 0)
586 swap(f2
, fp
->fi_fds
[O_RDWR
]);
587 spin_unlock(&fp
->fi_lock
);
595 static void nfs4_file_put_access(struct nfs4_file
*fp
, u32 access
)
597 WARN_ON_ONCE(access
& ~NFS4_SHARE_ACCESS_BOTH
);
599 if (access
& NFS4_SHARE_ACCESS_WRITE
)
600 __nfs4_file_put_access(fp
, O_WRONLY
);
601 if (access
& NFS4_SHARE_ACCESS_READ
)
602 __nfs4_file_put_access(fp
, O_RDONLY
);
606 * Allocate a new open/delegation state counter. This is needed for
607 * pNFS for proper return on close semantics.
609 * Note that we only allocate it for pNFS-enabled exports, otherwise
610 * all pointers to struct nfs4_clnt_odstate are always NULL.
612 static struct nfs4_clnt_odstate
*
613 alloc_clnt_odstate(struct nfs4_client
*clp
)
615 struct nfs4_clnt_odstate
*co
;
617 co
= kmem_cache_zalloc(odstate_slab
, GFP_KERNEL
);
620 refcount_set(&co
->co_odcount
, 1);
626 hash_clnt_odstate_locked(struct nfs4_clnt_odstate
*co
)
628 struct nfs4_file
*fp
= co
->co_file
;
630 lockdep_assert_held(&fp
->fi_lock
);
631 list_add(&co
->co_perfile
, &fp
->fi_clnt_odstate
);
635 get_clnt_odstate(struct nfs4_clnt_odstate
*co
)
638 refcount_inc(&co
->co_odcount
);
642 put_clnt_odstate(struct nfs4_clnt_odstate
*co
)
644 struct nfs4_file
*fp
;
650 if (refcount_dec_and_lock(&co
->co_odcount
, &fp
->fi_lock
)) {
651 list_del(&co
->co_perfile
);
652 spin_unlock(&fp
->fi_lock
);
654 nfsd4_return_all_file_layouts(co
->co_client
, fp
);
655 kmem_cache_free(odstate_slab
, co
);
659 static struct nfs4_clnt_odstate
*
660 find_or_hash_clnt_odstate(struct nfs4_file
*fp
, struct nfs4_clnt_odstate
*new)
662 struct nfs4_clnt_odstate
*co
;
663 struct nfs4_client
*cl
;
670 spin_lock(&fp
->fi_lock
);
671 list_for_each_entry(co
, &fp
->fi_clnt_odstate
, co_perfile
) {
672 if (co
->co_client
== cl
) {
673 get_clnt_odstate(co
);
679 hash_clnt_odstate_locked(new);
681 spin_unlock(&fp
->fi_lock
);
685 struct nfs4_stid
*nfs4_alloc_stid(struct nfs4_client
*cl
, struct kmem_cache
*slab
,
686 void (*sc_free
)(struct nfs4_stid
*))
688 struct nfs4_stid
*stid
;
691 stid
= kmem_cache_zalloc(slab
, GFP_KERNEL
);
695 idr_preload(GFP_KERNEL
);
696 spin_lock(&cl
->cl_lock
);
697 new_id
= idr_alloc_cyclic(&cl
->cl_stateids
, stid
, 0, 0, GFP_NOWAIT
);
698 spin_unlock(&cl
->cl_lock
);
703 stid
->sc_free
= sc_free
;
704 stid
->sc_client
= cl
;
705 stid
->sc_stateid
.si_opaque
.so_id
= new_id
;
706 stid
->sc_stateid
.si_opaque
.so_clid
= cl
->cl_clientid
;
707 /* Will be incremented before return to client: */
708 refcount_set(&stid
->sc_count
, 1);
709 spin_lock_init(&stid
->sc_lock
);
712 * It shouldn't be a problem to reuse an opaque stateid value.
713 * I don't think it is for 4.1. But with 4.0 I worry that, for
714 * example, a stray write retransmission could be accepted by
715 * the server when it should have been rejected. Therefore,
716 * adopt a trick from the sctp code to attempt to maximize the
717 * amount of time until an id is reused, by ensuring they always
718 * "increase" (mod INT_MAX):
722 kmem_cache_free(slab
, stid
);
727 * Create a unique stateid_t to represent each COPY.
729 int nfs4_init_cp_state(struct nfsd_net
*nn
, struct nfsd4_copy
*copy
)
733 idr_preload(GFP_KERNEL
);
734 spin_lock(&nn
->s2s_cp_lock
);
735 new_id
= idr_alloc_cyclic(&nn
->s2s_cp_stateids
, copy
, 0, 0, GFP_NOWAIT
);
736 spin_unlock(&nn
->s2s_cp_lock
);
740 copy
->cp_stateid
.si_opaque
.so_id
= new_id
;
741 copy
->cp_stateid
.si_opaque
.so_clid
.cl_boot
= nn
->boot_time
;
742 copy
->cp_stateid
.si_opaque
.so_clid
.cl_id
= nn
->s2s_cp_cl_id
;
746 void nfs4_free_cp_state(struct nfsd4_copy
*copy
)
750 nn
= net_generic(copy
->cp_clp
->net
, nfsd_net_id
);
751 spin_lock(&nn
->s2s_cp_lock
);
752 idr_remove(&nn
->s2s_cp_stateids
, copy
->cp_stateid
.si_opaque
.so_id
);
753 spin_unlock(&nn
->s2s_cp_lock
);
756 static struct nfs4_ol_stateid
* nfs4_alloc_open_stateid(struct nfs4_client
*clp
)
758 struct nfs4_stid
*stid
;
760 stid
= nfs4_alloc_stid(clp
, stateid_slab
, nfs4_free_ol_stateid
);
764 return openlockstateid(stid
);
767 static void nfs4_free_deleg(struct nfs4_stid
*stid
)
769 kmem_cache_free(deleg_slab
, stid
);
770 atomic_long_dec(&num_delegations
);
774 * When we recall a delegation, we should be careful not to hand it
775 * out again straight away.
776 * To ensure this we keep a pair of bloom filters ('new' and 'old')
777 * in which the filehandles of recalled delegations are "stored".
778 * If a filehandle appear in either filter, a delegation is blocked.
779 * When a delegation is recalled, the filehandle is stored in the "new"
781 * Every 30 seconds we swap the filters and clear the "new" one,
782 * unless both are empty of course.
784 * Each filter is 256 bits. We hash the filehandle to 32bit and use the
785 * low 3 bytes as hash-table indices.
787 * 'blocked_delegations_lock', which is always taken in block_delegations(),
788 * is used to manage concurrent access. Testing does not need the lock
789 * except when swapping the two filters.
791 static DEFINE_SPINLOCK(blocked_delegations_lock
);
792 static struct bloom_pair
{
793 int entries
, old_entries
;
795 int new; /* index into 'set' */
796 DECLARE_BITMAP(set
[2], 256);
797 } blocked_delegations
;
799 static int delegation_blocked(struct knfsd_fh
*fh
)
802 struct bloom_pair
*bd
= &blocked_delegations
;
804 if (bd
->entries
== 0)
806 if (seconds_since_boot() - bd
->swap_time
> 30) {
807 spin_lock(&blocked_delegations_lock
);
808 if (seconds_since_boot() - bd
->swap_time
> 30) {
809 bd
->entries
-= bd
->old_entries
;
810 bd
->old_entries
= bd
->entries
;
811 memset(bd
->set
[bd
->new], 0,
814 bd
->swap_time
= seconds_since_boot();
816 spin_unlock(&blocked_delegations_lock
);
818 hash
= jhash(&fh
->fh_base
, fh
->fh_size
, 0);
819 if (test_bit(hash
&255, bd
->set
[0]) &&
820 test_bit((hash
>>8)&255, bd
->set
[0]) &&
821 test_bit((hash
>>16)&255, bd
->set
[0]))
824 if (test_bit(hash
&255, bd
->set
[1]) &&
825 test_bit((hash
>>8)&255, bd
->set
[1]) &&
826 test_bit((hash
>>16)&255, bd
->set
[1]))
832 static void block_delegations(struct knfsd_fh
*fh
)
835 struct bloom_pair
*bd
= &blocked_delegations
;
837 hash
= jhash(&fh
->fh_base
, fh
->fh_size
, 0);
839 spin_lock(&blocked_delegations_lock
);
840 __set_bit(hash
&255, bd
->set
[bd
->new]);
841 __set_bit((hash
>>8)&255, bd
->set
[bd
->new]);
842 __set_bit((hash
>>16)&255, bd
->set
[bd
->new]);
843 if (bd
->entries
== 0)
844 bd
->swap_time
= seconds_since_boot();
846 spin_unlock(&blocked_delegations_lock
);
849 static struct nfs4_delegation
*
850 alloc_init_deleg(struct nfs4_client
*clp
, struct nfs4_file
*fp
,
851 struct svc_fh
*current_fh
,
852 struct nfs4_clnt_odstate
*odstate
)
854 struct nfs4_delegation
*dp
;
857 dprintk("NFSD alloc_init_deleg\n");
858 n
= atomic_long_inc_return(&num_delegations
);
859 if (n
< 0 || n
> max_delegations
)
861 if (delegation_blocked(¤t_fh
->fh_handle
))
863 dp
= delegstateid(nfs4_alloc_stid(clp
, deleg_slab
, nfs4_free_deleg
));
868 * delegation seqid's are never incremented. The 4.1 special
869 * meaning of seqid 0 isn't meaningful, really, but let's avoid
870 * 0 anyway just for consistency and use 1:
872 dp
->dl_stid
.sc_stateid
.si_generation
= 1;
873 INIT_LIST_HEAD(&dp
->dl_perfile
);
874 INIT_LIST_HEAD(&dp
->dl_perclnt
);
875 INIT_LIST_HEAD(&dp
->dl_recall_lru
);
876 dp
->dl_clnt_odstate
= odstate
;
877 get_clnt_odstate(odstate
);
878 dp
->dl_type
= NFS4_OPEN_DELEGATE_READ
;
880 nfsd4_init_cb(&dp
->dl_recall
, dp
->dl_stid
.sc_client
,
881 &nfsd4_cb_recall_ops
, NFSPROC4_CLNT_CB_RECALL
);
883 dp
->dl_stid
.sc_file
= fp
;
886 atomic_long_dec(&num_delegations
);
891 nfs4_put_stid(struct nfs4_stid
*s
)
893 struct nfs4_file
*fp
= s
->sc_file
;
894 struct nfs4_client
*clp
= s
->sc_client
;
896 might_lock(&clp
->cl_lock
);
898 if (!refcount_dec_and_lock(&s
->sc_count
, &clp
->cl_lock
)) {
899 wake_up_all(&close_wq
);
902 idr_remove(&clp
->cl_stateids
, s
->sc_stateid
.si_opaque
.so_id
);
903 spin_unlock(&clp
->cl_lock
);
910 nfs4_inc_and_copy_stateid(stateid_t
*dst
, struct nfs4_stid
*stid
)
912 stateid_t
*src
= &stid
->sc_stateid
;
914 spin_lock(&stid
->sc_lock
);
915 if (unlikely(++src
->si_generation
== 0))
916 src
->si_generation
= 1;
917 memcpy(dst
, src
, sizeof(*dst
));
918 spin_unlock(&stid
->sc_lock
);
921 static void put_deleg_file(struct nfs4_file
*fp
)
923 struct file
*filp
= NULL
;
925 spin_lock(&fp
->fi_lock
);
926 if (--fp
->fi_delegees
== 0)
927 swap(filp
, fp
->fi_deleg_file
);
928 spin_unlock(&fp
->fi_lock
);
934 static void nfs4_unlock_deleg_lease(struct nfs4_delegation
*dp
)
936 struct nfs4_file
*fp
= dp
->dl_stid
.sc_file
;
937 struct file
*filp
= fp
->fi_deleg_file
;
939 WARN_ON_ONCE(!fp
->fi_delegees
);
941 vfs_setlease(filp
, F_UNLCK
, NULL
, (void **)&dp
);
945 static void destroy_unhashed_deleg(struct nfs4_delegation
*dp
)
947 put_clnt_odstate(dp
->dl_clnt_odstate
);
948 nfs4_unlock_deleg_lease(dp
);
949 nfs4_put_stid(&dp
->dl_stid
);
952 void nfs4_unhash_stid(struct nfs4_stid
*s
)
958 * nfs4_delegation_exists - Discover if this delegation already exists
959 * @clp: a pointer to the nfs4_client we're granting a delegation to
960 * @fp: a pointer to the nfs4_file we're granting a delegation on
963 * On success: true iff an existing delegation is found
967 nfs4_delegation_exists(struct nfs4_client
*clp
, struct nfs4_file
*fp
)
969 struct nfs4_delegation
*searchdp
= NULL
;
970 struct nfs4_client
*searchclp
= NULL
;
972 lockdep_assert_held(&state_lock
);
973 lockdep_assert_held(&fp
->fi_lock
);
975 list_for_each_entry(searchdp
, &fp
->fi_delegations
, dl_perfile
) {
976 searchclp
= searchdp
->dl_stid
.sc_client
;
977 if (clp
== searchclp
) {
985 * hash_delegation_locked - Add a delegation to the appropriate lists
986 * @dp: a pointer to the nfs4_delegation we are adding.
987 * @fp: a pointer to the nfs4_file we're granting a delegation on
990 * On success: NULL if the delegation was successfully hashed.
992 * On error: -EAGAIN if one was previously granted to this
993 * nfs4_client for this nfs4_file. Delegation is not hashed.
998 hash_delegation_locked(struct nfs4_delegation
*dp
, struct nfs4_file
*fp
)
1000 struct nfs4_client
*clp
= dp
->dl_stid
.sc_client
;
1002 lockdep_assert_held(&state_lock
);
1003 lockdep_assert_held(&fp
->fi_lock
);
1005 if (nfs4_delegation_exists(clp
, fp
))
1007 refcount_inc(&dp
->dl_stid
.sc_count
);
1008 dp
->dl_stid
.sc_type
= NFS4_DELEG_STID
;
1009 list_add(&dp
->dl_perfile
, &fp
->fi_delegations
);
1010 list_add(&dp
->dl_perclnt
, &clp
->cl_delegations
);
1015 unhash_delegation_locked(struct nfs4_delegation
*dp
)
1017 struct nfs4_file
*fp
= dp
->dl_stid
.sc_file
;
1019 lockdep_assert_held(&state_lock
);
1021 if (list_empty(&dp
->dl_perfile
))
1024 dp
->dl_stid
.sc_type
= NFS4_CLOSED_DELEG_STID
;
1025 /* Ensure that deleg break won't try to requeue it */
1027 spin_lock(&fp
->fi_lock
);
1028 list_del_init(&dp
->dl_perclnt
);
1029 list_del_init(&dp
->dl_recall_lru
);
1030 list_del_init(&dp
->dl_perfile
);
1031 spin_unlock(&fp
->fi_lock
);
1035 static void destroy_delegation(struct nfs4_delegation
*dp
)
1039 spin_lock(&state_lock
);
1040 unhashed
= unhash_delegation_locked(dp
);
1041 spin_unlock(&state_lock
);
1043 destroy_unhashed_deleg(dp
);
1046 static void revoke_delegation(struct nfs4_delegation
*dp
)
1048 struct nfs4_client
*clp
= dp
->dl_stid
.sc_client
;
1050 WARN_ON(!list_empty(&dp
->dl_recall_lru
));
1052 if (clp
->cl_minorversion
) {
1053 dp
->dl_stid
.sc_type
= NFS4_REVOKED_DELEG_STID
;
1054 refcount_inc(&dp
->dl_stid
.sc_count
);
1055 spin_lock(&clp
->cl_lock
);
1056 list_add(&dp
->dl_recall_lru
, &clp
->cl_revoked
);
1057 spin_unlock(&clp
->cl_lock
);
1059 destroy_unhashed_deleg(dp
);
1066 static unsigned int clientid_hashval(u32 id
)
1068 return id
& CLIENT_HASH_MASK
;
1071 static unsigned int clientstr_hashval(struct xdr_netobj name
)
1073 return opaque_hashval(name
.data
, 8) & CLIENT_HASH_MASK
;
1077 * We store the NONE, READ, WRITE, and BOTH bits separately in the
1078 * st_{access,deny}_bmap field of the stateid, in order to track not
1079 * only what share bits are currently in force, but also what
1080 * combinations of share bits previous opens have used. This allows us
1081 * to enforce the recommendation of rfc 3530 14.2.19 that the server
1082 * return an error if the client attempt to downgrade to a combination
1083 * of share bits not explicable by closing some of its previous opens.
1085 * XXX: This enforcement is actually incomplete, since we don't keep
1086 * track of access/deny bit combinations; so, e.g., we allow:
1088 * OPEN allow read, deny write
1089 * OPEN allow both, deny none
1090 * DOWNGRADE allow read, deny none
1092 * which we should reject.
1095 bmap_to_share_mode(unsigned long bmap
) {
1097 unsigned int access
= 0;
1099 for (i
= 1; i
< 4; i
++) {
1100 if (test_bit(i
, &bmap
))
1106 /* set share access for a given stateid */
1108 set_access(u32 access
, struct nfs4_ol_stateid
*stp
)
1110 unsigned char mask
= 1 << access
;
1112 WARN_ON_ONCE(access
> NFS4_SHARE_ACCESS_BOTH
);
1113 stp
->st_access_bmap
|= mask
;
1116 /* clear share access for a given stateid */
1118 clear_access(u32 access
, struct nfs4_ol_stateid
*stp
)
1120 unsigned char mask
= 1 << access
;
1122 WARN_ON_ONCE(access
> NFS4_SHARE_ACCESS_BOTH
);
1123 stp
->st_access_bmap
&= ~mask
;
1126 /* test whether a given stateid has access */
1128 test_access(u32 access
, struct nfs4_ol_stateid
*stp
)
1130 unsigned char mask
= 1 << access
;
1132 return (bool)(stp
->st_access_bmap
& mask
);
1135 /* set share deny for a given stateid */
1137 set_deny(u32 deny
, struct nfs4_ol_stateid
*stp
)
1139 unsigned char mask
= 1 << deny
;
1141 WARN_ON_ONCE(deny
> NFS4_SHARE_DENY_BOTH
);
1142 stp
->st_deny_bmap
|= mask
;
1145 /* clear share deny for a given stateid */
1147 clear_deny(u32 deny
, struct nfs4_ol_stateid
*stp
)
1149 unsigned char mask
= 1 << deny
;
1151 WARN_ON_ONCE(deny
> NFS4_SHARE_DENY_BOTH
);
1152 stp
->st_deny_bmap
&= ~mask
;
1155 /* test whether a given stateid is denying specific access */
1157 test_deny(u32 deny
, struct nfs4_ol_stateid
*stp
)
1159 unsigned char mask
= 1 << deny
;
1161 return (bool)(stp
->st_deny_bmap
& mask
);
1164 static int nfs4_access_to_omode(u32 access
)
1166 switch (access
& NFS4_SHARE_ACCESS_BOTH
) {
1167 case NFS4_SHARE_ACCESS_READ
:
1169 case NFS4_SHARE_ACCESS_WRITE
:
1171 case NFS4_SHARE_ACCESS_BOTH
:
1179 * A stateid that had a deny mode associated with it is being released
1180 * or downgraded. Recalculate the deny mode on the file.
1183 recalculate_deny_mode(struct nfs4_file
*fp
)
1185 struct nfs4_ol_stateid
*stp
;
1187 spin_lock(&fp
->fi_lock
);
1188 fp
->fi_share_deny
= 0;
1189 list_for_each_entry(stp
, &fp
->fi_stateids
, st_perfile
)
1190 fp
->fi_share_deny
|= bmap_to_share_mode(stp
->st_deny_bmap
);
1191 spin_unlock(&fp
->fi_lock
);
1195 reset_union_bmap_deny(u32 deny
, struct nfs4_ol_stateid
*stp
)
1198 bool change
= false;
1200 for (i
= 1; i
< 4; i
++) {
1201 if ((i
& deny
) != i
) {
1207 /* Recalculate per-file deny mode if there was a change */
1209 recalculate_deny_mode(stp
->st_stid
.sc_file
);
1212 /* release all access and file references for a given stateid */
1214 release_all_access(struct nfs4_ol_stateid
*stp
)
1217 struct nfs4_file
*fp
= stp
->st_stid
.sc_file
;
1219 if (fp
&& stp
->st_deny_bmap
!= 0)
1220 recalculate_deny_mode(fp
);
1222 for (i
= 1; i
< 4; i
++) {
1223 if (test_access(i
, stp
))
1224 nfs4_file_put_access(stp
->st_stid
.sc_file
, i
);
1225 clear_access(i
, stp
);
1229 static inline void nfs4_free_stateowner(struct nfs4_stateowner
*sop
)
1231 kfree(sop
->so_owner
.data
);
1232 sop
->so_ops
->so_free(sop
);
1235 static void nfs4_put_stateowner(struct nfs4_stateowner
*sop
)
1237 struct nfs4_client
*clp
= sop
->so_client
;
1239 might_lock(&clp
->cl_lock
);
1241 if (!atomic_dec_and_lock(&sop
->so_count
, &clp
->cl_lock
))
1243 sop
->so_ops
->so_unhash(sop
);
1244 spin_unlock(&clp
->cl_lock
);
1245 nfs4_free_stateowner(sop
);
1248 static bool unhash_ol_stateid(struct nfs4_ol_stateid
*stp
)
1250 struct nfs4_file
*fp
= stp
->st_stid
.sc_file
;
1252 lockdep_assert_held(&stp
->st_stateowner
->so_client
->cl_lock
);
1254 if (list_empty(&stp
->st_perfile
))
1257 spin_lock(&fp
->fi_lock
);
1258 list_del_init(&stp
->st_perfile
);
1259 spin_unlock(&fp
->fi_lock
);
1260 list_del(&stp
->st_perstateowner
);
1264 static void nfs4_free_ol_stateid(struct nfs4_stid
*stid
)
1266 struct nfs4_ol_stateid
*stp
= openlockstateid(stid
);
1268 put_clnt_odstate(stp
->st_clnt_odstate
);
1269 release_all_access(stp
);
1270 if (stp
->st_stateowner
)
1271 nfs4_put_stateowner(stp
->st_stateowner
);
1272 kmem_cache_free(stateid_slab
, stid
);
1275 static void nfs4_free_lock_stateid(struct nfs4_stid
*stid
)
1277 struct nfs4_ol_stateid
*stp
= openlockstateid(stid
);
1278 struct nfs4_lockowner
*lo
= lockowner(stp
->st_stateowner
);
1281 file
= find_any_file(stp
->st_stid
.sc_file
);
1283 filp_close(file
, (fl_owner_t
)lo
);
1284 nfs4_free_ol_stateid(stid
);
1288 * Put the persistent reference to an already unhashed generic stateid, while
1289 * holding the cl_lock. If it's the last reference, then put it onto the
1290 * reaplist for later destruction.
1292 static void put_ol_stateid_locked(struct nfs4_ol_stateid
*stp
,
1293 struct list_head
*reaplist
)
1295 struct nfs4_stid
*s
= &stp
->st_stid
;
1296 struct nfs4_client
*clp
= s
->sc_client
;
1298 lockdep_assert_held(&clp
->cl_lock
);
1300 WARN_ON_ONCE(!list_empty(&stp
->st_locks
));
1302 if (!refcount_dec_and_test(&s
->sc_count
)) {
1303 wake_up_all(&close_wq
);
1307 idr_remove(&clp
->cl_stateids
, s
->sc_stateid
.si_opaque
.so_id
);
1308 list_add(&stp
->st_locks
, reaplist
);
1311 static bool unhash_lock_stateid(struct nfs4_ol_stateid
*stp
)
1313 lockdep_assert_held(&stp
->st_stid
.sc_client
->cl_lock
);
1315 list_del_init(&stp
->st_locks
);
1316 nfs4_unhash_stid(&stp
->st_stid
);
1317 return unhash_ol_stateid(stp
);
1320 static void release_lock_stateid(struct nfs4_ol_stateid
*stp
)
1322 struct nfs4_client
*clp
= stp
->st_stid
.sc_client
;
1325 spin_lock(&clp
->cl_lock
);
1326 unhashed
= unhash_lock_stateid(stp
);
1327 spin_unlock(&clp
->cl_lock
);
1329 nfs4_put_stid(&stp
->st_stid
);
1332 static void unhash_lockowner_locked(struct nfs4_lockowner
*lo
)
1334 struct nfs4_client
*clp
= lo
->lo_owner
.so_client
;
1336 lockdep_assert_held(&clp
->cl_lock
);
1338 list_del_init(&lo
->lo_owner
.so_strhash
);
1342 * Free a list of generic stateids that were collected earlier after being
1346 free_ol_stateid_reaplist(struct list_head
*reaplist
)
1348 struct nfs4_ol_stateid
*stp
;
1349 struct nfs4_file
*fp
;
1353 while (!list_empty(reaplist
)) {
1354 stp
= list_first_entry(reaplist
, struct nfs4_ol_stateid
,
1356 list_del(&stp
->st_locks
);
1357 fp
= stp
->st_stid
.sc_file
;
1358 stp
->st_stid
.sc_free(&stp
->st_stid
);
1364 static void release_open_stateid_locks(struct nfs4_ol_stateid
*open_stp
,
1365 struct list_head
*reaplist
)
1367 struct nfs4_ol_stateid
*stp
;
1369 lockdep_assert_held(&open_stp
->st_stid
.sc_client
->cl_lock
);
1371 while (!list_empty(&open_stp
->st_locks
)) {
1372 stp
= list_entry(open_stp
->st_locks
.next
,
1373 struct nfs4_ol_stateid
, st_locks
);
1374 WARN_ON(!unhash_lock_stateid(stp
));
1375 put_ol_stateid_locked(stp
, reaplist
);
1379 static bool unhash_open_stateid(struct nfs4_ol_stateid
*stp
,
1380 struct list_head
*reaplist
)
1384 lockdep_assert_held(&stp
->st_stid
.sc_client
->cl_lock
);
1386 unhashed
= unhash_ol_stateid(stp
);
1387 release_open_stateid_locks(stp
, reaplist
);
1391 static void release_open_stateid(struct nfs4_ol_stateid
*stp
)
1393 LIST_HEAD(reaplist
);
1395 spin_lock(&stp
->st_stid
.sc_client
->cl_lock
);
1396 if (unhash_open_stateid(stp
, &reaplist
))
1397 put_ol_stateid_locked(stp
, &reaplist
);
1398 spin_unlock(&stp
->st_stid
.sc_client
->cl_lock
);
1399 free_ol_stateid_reaplist(&reaplist
);
1402 static void unhash_openowner_locked(struct nfs4_openowner
*oo
)
1404 struct nfs4_client
*clp
= oo
->oo_owner
.so_client
;
1406 lockdep_assert_held(&clp
->cl_lock
);
1408 list_del_init(&oo
->oo_owner
.so_strhash
);
1409 list_del_init(&oo
->oo_perclient
);
1412 static void release_last_closed_stateid(struct nfs4_openowner
*oo
)
1414 struct nfsd_net
*nn
= net_generic(oo
->oo_owner
.so_client
->net
,
1416 struct nfs4_ol_stateid
*s
;
1418 spin_lock(&nn
->client_lock
);
1419 s
= oo
->oo_last_closed_stid
;
1421 list_del_init(&oo
->oo_close_lru
);
1422 oo
->oo_last_closed_stid
= NULL
;
1424 spin_unlock(&nn
->client_lock
);
1426 nfs4_put_stid(&s
->st_stid
);
1429 static void release_openowner(struct nfs4_openowner
*oo
)
1431 struct nfs4_ol_stateid
*stp
;
1432 struct nfs4_client
*clp
= oo
->oo_owner
.so_client
;
1433 struct list_head reaplist
;
1435 INIT_LIST_HEAD(&reaplist
);
1437 spin_lock(&clp
->cl_lock
);
1438 unhash_openowner_locked(oo
);
1439 while (!list_empty(&oo
->oo_owner
.so_stateids
)) {
1440 stp
= list_first_entry(&oo
->oo_owner
.so_stateids
,
1441 struct nfs4_ol_stateid
, st_perstateowner
);
1442 if (unhash_open_stateid(stp
, &reaplist
))
1443 put_ol_stateid_locked(stp
, &reaplist
);
1445 spin_unlock(&clp
->cl_lock
);
1446 free_ol_stateid_reaplist(&reaplist
);
1447 release_last_closed_stateid(oo
);
1448 nfs4_put_stateowner(&oo
->oo_owner
);
1452 hash_sessionid(struct nfs4_sessionid
*sessionid
)
1454 struct nfsd4_sessionid
*sid
= (struct nfsd4_sessionid
*)sessionid
;
1456 return sid
->sequence
% SESSION_HASH_SIZE
;
1459 #ifdef CONFIG_SUNRPC_DEBUG
1461 dump_sessionid(const char *fn
, struct nfs4_sessionid
*sessionid
)
1463 u32
*ptr
= (u32
*)(&sessionid
->data
[0]);
1464 dprintk("%s: %u:%u:%u:%u\n", fn
, ptr
[0], ptr
[1], ptr
[2], ptr
[3]);
1468 dump_sessionid(const char *fn
, struct nfs4_sessionid
*sessionid
)
1474 * Bump the seqid on cstate->replay_owner, and clear replay_owner if it
1475 * won't be used for replay.
1477 void nfsd4_bump_seqid(struct nfsd4_compound_state
*cstate
, __be32 nfserr
)
1479 struct nfs4_stateowner
*so
= cstate
->replay_owner
;
1481 if (nfserr
== nfserr_replay_me
)
1484 if (!seqid_mutating_err(ntohl(nfserr
))) {
1485 nfsd4_cstate_clear_replay(cstate
);
1490 if (so
->so_is_open_owner
)
1491 release_last_closed_stateid(openowner(so
));
1497 gen_sessionid(struct nfsd4_session
*ses
)
1499 struct nfs4_client
*clp
= ses
->se_client
;
1500 struct nfsd4_sessionid
*sid
;
1502 sid
= (struct nfsd4_sessionid
*)ses
->se_sessionid
.data
;
1503 sid
->clientid
= clp
->cl_clientid
;
1504 sid
->sequence
= current_sessionid
++;
1509 * The protocol defines ca_maxresponssize_cached to include the size of
1510 * the rpc header, but all we need to cache is the data starting after
1511 * the end of the initial SEQUENCE operation--the rest we regenerate
1512 * each time. Therefore we can advertise a ca_maxresponssize_cached
1513 * value that is the number of bytes in our cache plus a few additional
1514 * bytes. In order to stay on the safe side, and not promise more than
1515 * we can cache, those additional bytes must be the minimum possible: 24
1516 * bytes of rpc header (xid through accept state, with AUTH_NULL
1517 * verifier), 12 for the compound header (with zero-length tag), and 44
1518 * for the SEQUENCE op response:
1520 #define NFSD_MIN_HDR_SEQ_SZ (24 + 12 + 44)
1523 free_session_slots(struct nfsd4_session
*ses
)
1527 for (i
= 0; i
< ses
->se_fchannel
.maxreqs
; i
++) {
1528 free_svc_cred(&ses
->se_slots
[i
]->sl_cred
);
1529 kfree(ses
->se_slots
[i
]);
1534 * We don't actually need to cache the rpc and session headers, so we
1535 * can allocate a little less for each slot:
1537 static inline u32
slot_bytes(struct nfsd4_channel_attrs
*ca
)
1541 if (ca
->maxresp_cached
< NFSD_MIN_HDR_SEQ_SZ
)
1544 size
= ca
->maxresp_cached
- NFSD_MIN_HDR_SEQ_SZ
;
1545 return size
+ sizeof(struct nfsd4_slot
);
1549 * XXX: If we run out of reserved DRC memory we could (up to a point)
1550 * re-negotiate active sessions and reduce their slot usage to make
1551 * room for new connections. For now we just fail the create session.
1553 static u32
nfsd4_get_drc_mem(struct nfsd4_channel_attrs
*ca
)
1555 u32 slotsize
= slot_bytes(ca
);
1556 u32 num
= ca
->maxreqs
;
1557 unsigned long avail
, total_avail
;
1559 spin_lock(&nfsd_drc_lock
);
1560 total_avail
= nfsd_drc_max_mem
- nfsd_drc_mem_used
;
1561 avail
= min((unsigned long)NFSD_MAX_MEM_PER_SESSION
, total_avail
);
1563 * Never use more than a third of the remaining memory,
1564 * unless it's the only way to give this client a slot:
1566 avail
= clamp_t(unsigned long, avail
, slotsize
, total_avail
/3);
1567 num
= min_t(int, num
, avail
/ slotsize
);
1568 nfsd_drc_mem_used
+= num
* slotsize
;
1569 spin_unlock(&nfsd_drc_lock
);
1574 static void nfsd4_put_drc_mem(struct nfsd4_channel_attrs
*ca
)
1576 int slotsize
= slot_bytes(ca
);
1578 spin_lock(&nfsd_drc_lock
);
1579 nfsd_drc_mem_used
-= slotsize
* ca
->maxreqs
;
1580 spin_unlock(&nfsd_drc_lock
);
1583 static struct nfsd4_session
*alloc_session(struct nfsd4_channel_attrs
*fattrs
,
1584 struct nfsd4_channel_attrs
*battrs
)
1586 int numslots
= fattrs
->maxreqs
;
1587 int slotsize
= slot_bytes(fattrs
);
1588 struct nfsd4_session
*new;
1591 BUILD_BUG_ON(NFSD_MAX_SLOTS_PER_SESSION
* sizeof(struct nfsd4_slot
*)
1592 + sizeof(struct nfsd4_session
) > PAGE_SIZE
);
1593 mem
= numslots
* sizeof(struct nfsd4_slot
*);
1595 new = kzalloc(sizeof(*new) + mem
, GFP_KERNEL
);
1598 /* allocate each struct nfsd4_slot and data cache in one piece */
1599 for (i
= 0; i
< numslots
; i
++) {
1600 new->se_slots
[i
] = kzalloc(slotsize
, GFP_KERNEL
);
1601 if (!new->se_slots
[i
])
1605 memcpy(&new->se_fchannel
, fattrs
, sizeof(struct nfsd4_channel_attrs
));
1606 memcpy(&new->se_bchannel
, battrs
, sizeof(struct nfsd4_channel_attrs
));
1611 kfree(new->se_slots
[i
]);
1616 static void free_conn(struct nfsd4_conn
*c
)
1618 svc_xprt_put(c
->cn_xprt
);
1622 static void nfsd4_conn_lost(struct svc_xpt_user
*u
)
1624 struct nfsd4_conn
*c
= container_of(u
, struct nfsd4_conn
, cn_xpt_user
);
1625 struct nfs4_client
*clp
= c
->cn_session
->se_client
;
1627 spin_lock(&clp
->cl_lock
);
1628 if (!list_empty(&c
->cn_persession
)) {
1629 list_del(&c
->cn_persession
);
1632 nfsd4_probe_callback(clp
);
1633 spin_unlock(&clp
->cl_lock
);
1636 static struct nfsd4_conn
*alloc_conn(struct svc_rqst
*rqstp
, u32 flags
)
1638 struct nfsd4_conn
*conn
;
1640 conn
= kmalloc(sizeof(struct nfsd4_conn
), GFP_KERNEL
);
1643 svc_xprt_get(rqstp
->rq_xprt
);
1644 conn
->cn_xprt
= rqstp
->rq_xprt
;
1645 conn
->cn_flags
= flags
;
1646 INIT_LIST_HEAD(&conn
->cn_xpt_user
.list
);
1650 static void __nfsd4_hash_conn(struct nfsd4_conn
*conn
, struct nfsd4_session
*ses
)
1652 conn
->cn_session
= ses
;
1653 list_add(&conn
->cn_persession
, &ses
->se_conns
);
1656 static void nfsd4_hash_conn(struct nfsd4_conn
*conn
, struct nfsd4_session
*ses
)
1658 struct nfs4_client
*clp
= ses
->se_client
;
1660 spin_lock(&clp
->cl_lock
);
1661 __nfsd4_hash_conn(conn
, ses
);
1662 spin_unlock(&clp
->cl_lock
);
1665 static int nfsd4_register_conn(struct nfsd4_conn
*conn
)
1667 conn
->cn_xpt_user
.callback
= nfsd4_conn_lost
;
1668 return register_xpt_user(conn
->cn_xprt
, &conn
->cn_xpt_user
);
1671 static void nfsd4_init_conn(struct svc_rqst
*rqstp
, struct nfsd4_conn
*conn
, struct nfsd4_session
*ses
)
1675 nfsd4_hash_conn(conn
, ses
);
1676 ret
= nfsd4_register_conn(conn
);
1678 /* oops; xprt is already down: */
1679 nfsd4_conn_lost(&conn
->cn_xpt_user
);
1680 /* We may have gained or lost a callback channel: */
1681 nfsd4_probe_callback_sync(ses
->se_client
);
1684 static struct nfsd4_conn
*alloc_conn_from_crses(struct svc_rqst
*rqstp
, struct nfsd4_create_session
*cses
)
1686 u32 dir
= NFS4_CDFC4_FORE
;
1688 if (cses
->flags
& SESSION4_BACK_CHAN
)
1689 dir
|= NFS4_CDFC4_BACK
;
1690 return alloc_conn(rqstp
, dir
);
1693 /* must be called under client_lock */
1694 static void nfsd4_del_conns(struct nfsd4_session
*s
)
1696 struct nfs4_client
*clp
= s
->se_client
;
1697 struct nfsd4_conn
*c
;
1699 spin_lock(&clp
->cl_lock
);
1700 while (!list_empty(&s
->se_conns
)) {
1701 c
= list_first_entry(&s
->se_conns
, struct nfsd4_conn
, cn_persession
);
1702 list_del_init(&c
->cn_persession
);
1703 spin_unlock(&clp
->cl_lock
);
1705 unregister_xpt_user(c
->cn_xprt
, &c
->cn_xpt_user
);
1708 spin_lock(&clp
->cl_lock
);
1710 spin_unlock(&clp
->cl_lock
);
1713 static void __free_session(struct nfsd4_session
*ses
)
1715 free_session_slots(ses
);
1719 static void free_session(struct nfsd4_session
*ses
)
1721 nfsd4_del_conns(ses
);
1722 nfsd4_put_drc_mem(&ses
->se_fchannel
);
1723 __free_session(ses
);
1726 static void init_session(struct svc_rqst
*rqstp
, struct nfsd4_session
*new, struct nfs4_client
*clp
, struct nfsd4_create_session
*cses
)
1729 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
1731 new->se_client
= clp
;
1734 INIT_LIST_HEAD(&new->se_conns
);
1736 new->se_cb_seq_nr
= 1;
1737 new->se_flags
= cses
->flags
;
1738 new->se_cb_prog
= cses
->callback_prog
;
1739 new->se_cb_sec
= cses
->cb_sec
;
1740 atomic_set(&new->se_ref
, 0);
1741 idx
= hash_sessionid(&new->se_sessionid
);
1742 list_add(&new->se_hash
, &nn
->sessionid_hashtbl
[idx
]);
1743 spin_lock(&clp
->cl_lock
);
1744 list_add(&new->se_perclnt
, &clp
->cl_sessions
);
1745 spin_unlock(&clp
->cl_lock
);
1748 struct sockaddr
*sa
= svc_addr(rqstp
);
1750 * This is a little silly; with sessions there's no real
1751 * use for the callback address. Use the peer address
1752 * as a reasonable default for now, but consider fixing
1753 * the rpc client not to require an address in the
1756 rpc_copy_addr((struct sockaddr
*)&clp
->cl_cb_conn
.cb_addr
, sa
);
1757 clp
->cl_cb_conn
.cb_addrlen
= svc_addr_len(sa
);
1761 /* caller must hold client_lock */
1762 static struct nfsd4_session
*
1763 __find_in_sessionid_hashtbl(struct nfs4_sessionid
*sessionid
, struct net
*net
)
1765 struct nfsd4_session
*elem
;
1767 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
1769 lockdep_assert_held(&nn
->client_lock
);
1771 dump_sessionid(__func__
, sessionid
);
1772 idx
= hash_sessionid(sessionid
);
1773 /* Search in the appropriate list */
1774 list_for_each_entry(elem
, &nn
->sessionid_hashtbl
[idx
], se_hash
) {
1775 if (!memcmp(elem
->se_sessionid
.data
, sessionid
->data
,
1776 NFS4_MAX_SESSIONID_LEN
)) {
1781 dprintk("%s: session not found\n", __func__
);
1785 static struct nfsd4_session
*
1786 find_in_sessionid_hashtbl(struct nfs4_sessionid
*sessionid
, struct net
*net
,
1789 struct nfsd4_session
*session
;
1790 __be32 status
= nfserr_badsession
;
1792 session
= __find_in_sessionid_hashtbl(sessionid
, net
);
1795 status
= nfsd4_get_session_locked(session
);
1803 /* caller must hold client_lock */
1805 unhash_session(struct nfsd4_session
*ses
)
1807 struct nfs4_client
*clp
= ses
->se_client
;
1808 struct nfsd_net
*nn
= net_generic(clp
->net
, nfsd_net_id
);
1810 lockdep_assert_held(&nn
->client_lock
);
1812 list_del(&ses
->se_hash
);
1813 spin_lock(&ses
->se_client
->cl_lock
);
1814 list_del(&ses
->se_perclnt
);
1815 spin_unlock(&ses
->se_client
->cl_lock
);
1818 /* SETCLIENTID and SETCLIENTID_CONFIRM Helper functions */
1820 STALE_CLIENTID(clientid_t
*clid
, struct nfsd_net
*nn
)
1823 * We're assuming the clid was not given out from a boot
1824 * precisely 2^32 (about 136 years) before this one. That seems
1825 * a safe assumption:
1827 if (clid
->cl_boot
== (u32
)nn
->boot_time
)
1829 dprintk("NFSD stale clientid (%08x/%08x) boot_time %08lx\n",
1830 clid
->cl_boot
, clid
->cl_id
, nn
->boot_time
);
1835 * XXX Should we use a slab cache ?
1836 * This type of memory management is somewhat inefficient, but we use it
1837 * anyway since SETCLIENTID is not a common operation.
1839 static struct nfs4_client
*alloc_client(struct xdr_netobj name
)
1841 struct nfs4_client
*clp
;
1844 clp
= kmem_cache_zalloc(client_slab
, GFP_KERNEL
);
1847 clp
->cl_name
.data
= kmemdup(name
.data
, name
.len
, GFP_KERNEL
);
1848 if (clp
->cl_name
.data
== NULL
)
1850 clp
->cl_ownerstr_hashtbl
= kmalloc_array(OWNER_HASH_SIZE
,
1851 sizeof(struct list_head
),
1853 if (!clp
->cl_ownerstr_hashtbl
)
1854 goto err_no_hashtbl
;
1855 for (i
= 0; i
< OWNER_HASH_SIZE
; i
++)
1856 INIT_LIST_HEAD(&clp
->cl_ownerstr_hashtbl
[i
]);
1857 clp
->cl_name
.len
= name
.len
;
1858 INIT_LIST_HEAD(&clp
->cl_sessions
);
1859 idr_init(&clp
->cl_stateids
);
1860 atomic_set(&clp
->cl_refcount
, 0);
1861 clp
->cl_cb_state
= NFSD4_CB_UNKNOWN
;
1862 INIT_LIST_HEAD(&clp
->cl_idhash
);
1863 INIT_LIST_HEAD(&clp
->cl_openowners
);
1864 INIT_LIST_HEAD(&clp
->cl_delegations
);
1865 INIT_LIST_HEAD(&clp
->cl_lru
);
1866 INIT_LIST_HEAD(&clp
->cl_revoked
);
1867 #ifdef CONFIG_NFSD_PNFS
1868 INIT_LIST_HEAD(&clp
->cl_lo_states
);
1870 INIT_LIST_HEAD(&clp
->async_copies
);
1871 spin_lock_init(&clp
->async_lock
);
1872 spin_lock_init(&clp
->cl_lock
);
1873 rpc_init_wait_queue(&clp
->cl_cb_waitq
, "Backchannel slot table");
1876 kfree(clp
->cl_name
.data
);
1878 kmem_cache_free(client_slab
, clp
);
1883 free_client(struct nfs4_client
*clp
)
1885 while (!list_empty(&clp
->cl_sessions
)) {
1886 struct nfsd4_session
*ses
;
1887 ses
= list_entry(clp
->cl_sessions
.next
, struct nfsd4_session
,
1889 list_del(&ses
->se_perclnt
);
1890 WARN_ON_ONCE(atomic_read(&ses
->se_ref
));
1893 rpc_destroy_wait_queue(&clp
->cl_cb_waitq
);
1894 free_svc_cred(&clp
->cl_cred
);
1895 kfree(clp
->cl_ownerstr_hashtbl
);
1896 kfree(clp
->cl_name
.data
);
1897 idr_destroy(&clp
->cl_stateids
);
1898 kmem_cache_free(client_slab
, clp
);
1901 /* must be called under the client_lock */
1903 unhash_client_locked(struct nfs4_client
*clp
)
1905 struct nfsd_net
*nn
= net_generic(clp
->net
, nfsd_net_id
);
1906 struct nfsd4_session
*ses
;
1908 lockdep_assert_held(&nn
->client_lock
);
1910 /* Mark the client as expired! */
1912 /* Make it invisible */
1913 if (!list_empty(&clp
->cl_idhash
)) {
1914 list_del_init(&clp
->cl_idhash
);
1915 if (test_bit(NFSD4_CLIENT_CONFIRMED
, &clp
->cl_flags
))
1916 rb_erase(&clp
->cl_namenode
, &nn
->conf_name_tree
);
1918 rb_erase(&clp
->cl_namenode
, &nn
->unconf_name_tree
);
1920 list_del_init(&clp
->cl_lru
);
1921 spin_lock(&clp
->cl_lock
);
1922 list_for_each_entry(ses
, &clp
->cl_sessions
, se_perclnt
)
1923 list_del_init(&ses
->se_hash
);
1924 spin_unlock(&clp
->cl_lock
);
1928 unhash_client(struct nfs4_client
*clp
)
1930 struct nfsd_net
*nn
= net_generic(clp
->net
, nfsd_net_id
);
1932 spin_lock(&nn
->client_lock
);
1933 unhash_client_locked(clp
);
1934 spin_unlock(&nn
->client_lock
);
1937 static __be32
mark_client_expired_locked(struct nfs4_client
*clp
)
1939 if (atomic_read(&clp
->cl_refcount
))
1940 return nfserr_jukebox
;
1941 unhash_client_locked(clp
);
1946 __destroy_client(struct nfs4_client
*clp
)
1949 struct nfs4_openowner
*oo
;
1950 struct nfs4_delegation
*dp
;
1951 struct list_head reaplist
;
1953 INIT_LIST_HEAD(&reaplist
);
1954 spin_lock(&state_lock
);
1955 while (!list_empty(&clp
->cl_delegations
)) {
1956 dp
= list_entry(clp
->cl_delegations
.next
, struct nfs4_delegation
, dl_perclnt
);
1957 WARN_ON(!unhash_delegation_locked(dp
));
1958 list_add(&dp
->dl_recall_lru
, &reaplist
);
1960 spin_unlock(&state_lock
);
1961 while (!list_empty(&reaplist
)) {
1962 dp
= list_entry(reaplist
.next
, struct nfs4_delegation
, dl_recall_lru
);
1963 list_del_init(&dp
->dl_recall_lru
);
1964 destroy_unhashed_deleg(dp
);
1966 while (!list_empty(&clp
->cl_revoked
)) {
1967 dp
= list_entry(clp
->cl_revoked
.next
, struct nfs4_delegation
, dl_recall_lru
);
1968 list_del_init(&dp
->dl_recall_lru
);
1969 nfs4_put_stid(&dp
->dl_stid
);
1971 while (!list_empty(&clp
->cl_openowners
)) {
1972 oo
= list_entry(clp
->cl_openowners
.next
, struct nfs4_openowner
, oo_perclient
);
1973 nfs4_get_stateowner(&oo
->oo_owner
);
1974 release_openowner(oo
);
1976 for (i
= 0; i
< OWNER_HASH_SIZE
; i
++) {
1977 struct nfs4_stateowner
*so
, *tmp
;
1979 list_for_each_entry_safe(so
, tmp
, &clp
->cl_ownerstr_hashtbl
[i
],
1981 /* Should be no openowners at this point */
1982 WARN_ON_ONCE(so
->so_is_open_owner
);
1983 remove_blocked_locks(lockowner(so
));
1986 nfsd4_return_all_client_layouts(clp
);
1987 nfsd4_shutdown_copy(clp
);
1988 nfsd4_shutdown_callback(clp
);
1989 if (clp
->cl_cb_conn
.cb_xprt
)
1990 svc_xprt_put(clp
->cl_cb_conn
.cb_xprt
);
1995 destroy_client(struct nfs4_client
*clp
)
1998 __destroy_client(clp
);
2001 static void inc_reclaim_complete(struct nfs4_client
*clp
)
2003 struct nfsd_net
*nn
= net_generic(clp
->net
, nfsd_net_id
);
2005 if (!nn
->track_reclaim_completes
)
2007 if (!nfsd4_find_reclaim_client(clp
->cl_name
, nn
))
2009 if (atomic_inc_return(&nn
->nr_reclaim_complete
) ==
2010 nn
->reclaim_str_hashtbl_size
) {
2011 printk(KERN_INFO
"NFSD: all clients done reclaiming, ending NFSv4 grace period (net %x)\n",
2013 nfsd4_end_grace(nn
);
2017 static void expire_client(struct nfs4_client
*clp
)
2020 nfsd4_client_record_remove(clp
);
2021 __destroy_client(clp
);
2024 static void copy_verf(struct nfs4_client
*target
, nfs4_verifier
*source
)
2026 memcpy(target
->cl_verifier
.data
, source
->data
,
2027 sizeof(target
->cl_verifier
.data
));
2030 static void copy_clid(struct nfs4_client
*target
, struct nfs4_client
*source
)
2032 target
->cl_clientid
.cl_boot
= source
->cl_clientid
.cl_boot
;
2033 target
->cl_clientid
.cl_id
= source
->cl_clientid
.cl_id
;
2036 static int copy_cred(struct svc_cred
*target
, struct svc_cred
*source
)
2038 target
->cr_principal
= kstrdup(source
->cr_principal
, GFP_KERNEL
);
2039 target
->cr_raw_principal
= kstrdup(source
->cr_raw_principal
,
2041 target
->cr_targ_princ
= kstrdup(source
->cr_targ_princ
, GFP_KERNEL
);
2042 if ((source
->cr_principal
&& !target
->cr_principal
) ||
2043 (source
->cr_raw_principal
&& !target
->cr_raw_principal
) ||
2044 (source
->cr_targ_princ
&& !target
->cr_targ_princ
))
2047 target
->cr_flavor
= source
->cr_flavor
;
2048 target
->cr_uid
= source
->cr_uid
;
2049 target
->cr_gid
= source
->cr_gid
;
2050 target
->cr_group_info
= source
->cr_group_info
;
2051 get_group_info(target
->cr_group_info
);
2052 target
->cr_gss_mech
= source
->cr_gss_mech
;
2053 if (source
->cr_gss_mech
)
2054 gss_mech_get(source
->cr_gss_mech
);
2059 compare_blob(const struct xdr_netobj
*o1
, const struct xdr_netobj
*o2
)
2061 if (o1
->len
< o2
->len
)
2063 if (o1
->len
> o2
->len
)
2065 return memcmp(o1
->data
, o2
->data
, o1
->len
);
2069 same_verf(nfs4_verifier
*v1
, nfs4_verifier
*v2
)
2071 return 0 == memcmp(v1
->data
, v2
->data
, sizeof(v1
->data
));
2075 same_clid(clientid_t
*cl1
, clientid_t
*cl2
)
2077 return (cl1
->cl_boot
== cl2
->cl_boot
) && (cl1
->cl_id
== cl2
->cl_id
);
2080 static bool groups_equal(struct group_info
*g1
, struct group_info
*g2
)
2084 if (g1
->ngroups
!= g2
->ngroups
)
2086 for (i
=0; i
<g1
->ngroups
; i
++)
2087 if (!gid_eq(g1
->gid
[i
], g2
->gid
[i
]))
2093 * RFC 3530 language requires clid_inuse be returned when the
2094 * "principal" associated with a requests differs from that previously
2095 * used. We use uid, gid's, and gss principal string as our best
2096 * approximation. We also don't want to allow non-gss use of a client
2097 * established using gss: in theory cr_principal should catch that
2098 * change, but in practice cr_principal can be null even in the gss case
2099 * since gssd doesn't always pass down a principal string.
2101 static bool is_gss_cred(struct svc_cred
*cr
)
2103 /* Is cr_flavor one of the gss "pseudoflavors"?: */
2104 return (cr
->cr_flavor
> RPC_AUTH_MAXFLAVOR
);
2109 same_creds(struct svc_cred
*cr1
, struct svc_cred
*cr2
)
2111 if ((is_gss_cred(cr1
) != is_gss_cred(cr2
))
2112 || (!uid_eq(cr1
->cr_uid
, cr2
->cr_uid
))
2113 || (!gid_eq(cr1
->cr_gid
, cr2
->cr_gid
))
2114 || !groups_equal(cr1
->cr_group_info
, cr2
->cr_group_info
))
2116 /* XXX: check that cr_targ_princ fields match ? */
2117 if (cr1
->cr_principal
== cr2
->cr_principal
)
2119 if (!cr1
->cr_principal
|| !cr2
->cr_principal
)
2121 return 0 == strcmp(cr1
->cr_principal
, cr2
->cr_principal
);
2124 static bool svc_rqst_integrity_protected(struct svc_rqst
*rqstp
)
2126 struct svc_cred
*cr
= &rqstp
->rq_cred
;
2129 if (!cr
->cr_gss_mech
)
2131 service
= gss_pseudoflavor_to_service(cr
->cr_gss_mech
, cr
->cr_flavor
);
2132 return service
== RPC_GSS_SVC_INTEGRITY
||
2133 service
== RPC_GSS_SVC_PRIVACY
;
2136 bool nfsd4_mach_creds_match(struct nfs4_client
*cl
, struct svc_rqst
*rqstp
)
2138 struct svc_cred
*cr
= &rqstp
->rq_cred
;
2140 if (!cl
->cl_mach_cred
)
2142 if (cl
->cl_cred
.cr_gss_mech
!= cr
->cr_gss_mech
)
2144 if (!svc_rqst_integrity_protected(rqstp
))
2146 if (cl
->cl_cred
.cr_raw_principal
)
2147 return 0 == strcmp(cl
->cl_cred
.cr_raw_principal
,
2148 cr
->cr_raw_principal
);
2149 if (!cr
->cr_principal
)
2151 return 0 == strcmp(cl
->cl_cred
.cr_principal
, cr
->cr_principal
);
2154 static void gen_confirm(struct nfs4_client
*clp
, struct nfsd_net
*nn
)
2159 * This is opaque to client, so no need to byte-swap. Use
2160 * __force to keep sparse happy
2162 verf
[0] = (__force __be32
)get_seconds();
2163 verf
[1] = (__force __be32
)nn
->clverifier_counter
++;
2164 memcpy(clp
->cl_confirm
.data
, verf
, sizeof(clp
->cl_confirm
.data
));
2167 static void gen_clid(struct nfs4_client
*clp
, struct nfsd_net
*nn
)
2169 clp
->cl_clientid
.cl_boot
= nn
->boot_time
;
2170 clp
->cl_clientid
.cl_id
= nn
->clientid_counter
++;
2171 gen_confirm(clp
, nn
);
2174 static struct nfs4_stid
*
2175 find_stateid_locked(struct nfs4_client
*cl
, stateid_t
*t
)
2177 struct nfs4_stid
*ret
;
2179 ret
= idr_find(&cl
->cl_stateids
, t
->si_opaque
.so_id
);
2180 if (!ret
|| !ret
->sc_type
)
2185 static struct nfs4_stid
*
2186 find_stateid_by_type(struct nfs4_client
*cl
, stateid_t
*t
, char typemask
)
2188 struct nfs4_stid
*s
;
2190 spin_lock(&cl
->cl_lock
);
2191 s
= find_stateid_locked(cl
, t
);
2193 if (typemask
& s
->sc_type
)
2194 refcount_inc(&s
->sc_count
);
2198 spin_unlock(&cl
->cl_lock
);
2202 static struct nfs4_client
*create_client(struct xdr_netobj name
,
2203 struct svc_rqst
*rqstp
, nfs4_verifier
*verf
)
2205 struct nfs4_client
*clp
;
2206 struct sockaddr
*sa
= svc_addr(rqstp
);
2208 struct net
*net
= SVC_NET(rqstp
);
2210 clp
= alloc_client(name
);
2214 ret
= copy_cred(&clp
->cl_cred
, &rqstp
->rq_cred
);
2219 nfsd4_init_cb(&clp
->cl_cb_null
, clp
, NULL
, NFSPROC4_CLNT_CB_NULL
);
2220 clp
->cl_time
= get_seconds();
2221 clear_bit(0, &clp
->cl_cb_slot_busy
);
2222 copy_verf(clp
, verf
);
2223 rpc_copy_addr((struct sockaddr
*) &clp
->cl_addr
, sa
);
2224 clp
->cl_cb_session
= NULL
;
2230 add_clp_to_name_tree(struct nfs4_client
*new_clp
, struct rb_root
*root
)
2232 struct rb_node
**new = &(root
->rb_node
), *parent
= NULL
;
2233 struct nfs4_client
*clp
;
2236 clp
= rb_entry(*new, struct nfs4_client
, cl_namenode
);
2239 if (compare_blob(&clp
->cl_name
, &new_clp
->cl_name
) > 0)
2240 new = &((*new)->rb_left
);
2242 new = &((*new)->rb_right
);
2245 rb_link_node(&new_clp
->cl_namenode
, parent
, new);
2246 rb_insert_color(&new_clp
->cl_namenode
, root
);
2249 static struct nfs4_client
*
2250 find_clp_in_name_tree(struct xdr_netobj
*name
, struct rb_root
*root
)
2253 struct rb_node
*node
= root
->rb_node
;
2254 struct nfs4_client
*clp
;
2257 clp
= rb_entry(node
, struct nfs4_client
, cl_namenode
);
2258 cmp
= compare_blob(&clp
->cl_name
, name
);
2260 node
= node
->rb_left
;
2262 node
= node
->rb_right
;
2270 add_to_unconfirmed(struct nfs4_client
*clp
)
2272 unsigned int idhashval
;
2273 struct nfsd_net
*nn
= net_generic(clp
->net
, nfsd_net_id
);
2275 lockdep_assert_held(&nn
->client_lock
);
2277 clear_bit(NFSD4_CLIENT_CONFIRMED
, &clp
->cl_flags
);
2278 add_clp_to_name_tree(clp
, &nn
->unconf_name_tree
);
2279 idhashval
= clientid_hashval(clp
->cl_clientid
.cl_id
);
2280 list_add(&clp
->cl_idhash
, &nn
->unconf_id_hashtbl
[idhashval
]);
2281 renew_client_locked(clp
);
2285 move_to_confirmed(struct nfs4_client
*clp
)
2287 unsigned int idhashval
= clientid_hashval(clp
->cl_clientid
.cl_id
);
2288 struct nfsd_net
*nn
= net_generic(clp
->net
, nfsd_net_id
);
2290 lockdep_assert_held(&nn
->client_lock
);
2292 dprintk("NFSD: move_to_confirm nfs4_client %p\n", clp
);
2293 list_move(&clp
->cl_idhash
, &nn
->conf_id_hashtbl
[idhashval
]);
2294 rb_erase(&clp
->cl_namenode
, &nn
->unconf_name_tree
);
2295 add_clp_to_name_tree(clp
, &nn
->conf_name_tree
);
2296 set_bit(NFSD4_CLIENT_CONFIRMED
, &clp
->cl_flags
);
2297 renew_client_locked(clp
);
2300 static struct nfs4_client
*
2301 find_client_in_id_table(struct list_head
*tbl
, clientid_t
*clid
, bool sessions
)
2303 struct nfs4_client
*clp
;
2304 unsigned int idhashval
= clientid_hashval(clid
->cl_id
);
2306 list_for_each_entry(clp
, &tbl
[idhashval
], cl_idhash
) {
2307 if (same_clid(&clp
->cl_clientid
, clid
)) {
2308 if ((bool)clp
->cl_minorversion
!= sessions
)
2310 renew_client_locked(clp
);
2317 static struct nfs4_client
*
2318 find_confirmed_client(clientid_t
*clid
, bool sessions
, struct nfsd_net
*nn
)
2320 struct list_head
*tbl
= nn
->conf_id_hashtbl
;
2322 lockdep_assert_held(&nn
->client_lock
);
2323 return find_client_in_id_table(tbl
, clid
, sessions
);
2326 static struct nfs4_client
*
2327 find_unconfirmed_client(clientid_t
*clid
, bool sessions
, struct nfsd_net
*nn
)
2329 struct list_head
*tbl
= nn
->unconf_id_hashtbl
;
2331 lockdep_assert_held(&nn
->client_lock
);
2332 return find_client_in_id_table(tbl
, clid
, sessions
);
2335 static bool clp_used_exchangeid(struct nfs4_client
*clp
)
2337 return clp
->cl_exchange_flags
!= 0;
2340 static struct nfs4_client
*
2341 find_confirmed_client_by_name(struct xdr_netobj
*name
, struct nfsd_net
*nn
)
2343 lockdep_assert_held(&nn
->client_lock
);
2344 return find_clp_in_name_tree(name
, &nn
->conf_name_tree
);
2347 static struct nfs4_client
*
2348 find_unconfirmed_client_by_name(struct xdr_netobj
*name
, struct nfsd_net
*nn
)
2350 lockdep_assert_held(&nn
->client_lock
);
2351 return find_clp_in_name_tree(name
, &nn
->unconf_name_tree
);
2355 gen_callback(struct nfs4_client
*clp
, struct nfsd4_setclientid
*se
, struct svc_rqst
*rqstp
)
2357 struct nfs4_cb_conn
*conn
= &clp
->cl_cb_conn
;
2358 struct sockaddr
*sa
= svc_addr(rqstp
);
2359 u32 scopeid
= rpc_get_scope_id(sa
);
2360 unsigned short expected_family
;
2362 /* Currently, we only support tcp and tcp6 for the callback channel */
2363 if (se
->se_callback_netid_len
== 3 &&
2364 !memcmp(se
->se_callback_netid_val
, "tcp", 3))
2365 expected_family
= AF_INET
;
2366 else if (se
->se_callback_netid_len
== 4 &&
2367 !memcmp(se
->se_callback_netid_val
, "tcp6", 4))
2368 expected_family
= AF_INET6
;
2372 conn
->cb_addrlen
= rpc_uaddr2sockaddr(clp
->net
, se
->se_callback_addr_val
,
2373 se
->se_callback_addr_len
,
2374 (struct sockaddr
*)&conn
->cb_addr
,
2375 sizeof(conn
->cb_addr
));
2377 if (!conn
->cb_addrlen
|| conn
->cb_addr
.ss_family
!= expected_family
)
2380 if (conn
->cb_addr
.ss_family
== AF_INET6
)
2381 ((struct sockaddr_in6
*)&conn
->cb_addr
)->sin6_scope_id
= scopeid
;
2383 conn
->cb_prog
= se
->se_callback_prog
;
2384 conn
->cb_ident
= se
->se_callback_ident
;
2385 memcpy(&conn
->cb_saddr
, &rqstp
->rq_daddr
, rqstp
->rq_daddrlen
);
2388 conn
->cb_addr
.ss_family
= AF_UNSPEC
;
2389 conn
->cb_addrlen
= 0;
2390 dprintk("NFSD: this client (clientid %08x/%08x) "
2391 "will not receive delegations\n",
2392 clp
->cl_clientid
.cl_boot
, clp
->cl_clientid
.cl_id
);
2398 * Cache a reply. nfsd4_check_resp_size() has bounded the cache size.
2401 nfsd4_store_cache_entry(struct nfsd4_compoundres
*resp
)
2403 struct xdr_buf
*buf
= resp
->xdr
.buf
;
2404 struct nfsd4_slot
*slot
= resp
->cstate
.slot
;
2407 dprintk("--> %s slot %p\n", __func__
, slot
);
2409 slot
->sl_flags
|= NFSD4_SLOT_INITIALIZED
;
2410 slot
->sl_opcnt
= resp
->opcnt
;
2411 slot
->sl_status
= resp
->cstate
.status
;
2412 free_svc_cred(&slot
->sl_cred
);
2413 copy_cred(&slot
->sl_cred
, &resp
->rqstp
->rq_cred
);
2415 if (!nfsd4_cache_this(resp
)) {
2416 slot
->sl_flags
&= ~NFSD4_SLOT_CACHED
;
2419 slot
->sl_flags
|= NFSD4_SLOT_CACHED
;
2421 base
= resp
->cstate
.data_offset
;
2422 slot
->sl_datalen
= buf
->len
- base
;
2423 if (read_bytes_from_xdr_buf(buf
, base
, slot
->sl_data
, slot
->sl_datalen
))
2424 WARN(1, "%s: sessions DRC could not cache compound\n",
2430 * Encode the replay sequence operation from the slot values.
2431 * If cachethis is FALSE encode the uncached rep error on the next
2432 * operation which sets resp->p and increments resp->opcnt for
2433 * nfs4svc_encode_compoundres.
2437 nfsd4_enc_sequence_replay(struct nfsd4_compoundargs
*args
,
2438 struct nfsd4_compoundres
*resp
)
2440 struct nfsd4_op
*op
;
2441 struct nfsd4_slot
*slot
= resp
->cstate
.slot
;
2443 /* Encode the replayed sequence operation */
2444 op
= &args
->ops
[resp
->opcnt
- 1];
2445 nfsd4_encode_operation(resp
, op
);
2447 if (slot
->sl_flags
& NFSD4_SLOT_CACHED
)
2449 if (args
->opcnt
== 1) {
2451 * The original operation wasn't a solo sequence--we
2452 * always cache those--so this retry must not match the
2455 op
->status
= nfserr_seq_false_retry
;
2457 op
= &args
->ops
[resp
->opcnt
++];
2458 op
->status
= nfserr_retry_uncached_rep
;
2459 nfsd4_encode_operation(resp
, op
);
2465 * The sequence operation is not cached because we can use the slot and
2469 nfsd4_replay_cache_entry(struct nfsd4_compoundres
*resp
,
2470 struct nfsd4_sequence
*seq
)
2472 struct nfsd4_slot
*slot
= resp
->cstate
.slot
;
2473 struct xdr_stream
*xdr
= &resp
->xdr
;
2477 dprintk("--> %s slot %p\n", __func__
, slot
);
2479 status
= nfsd4_enc_sequence_replay(resp
->rqstp
->rq_argp
, resp
);
2483 p
= xdr_reserve_space(xdr
, slot
->sl_datalen
);
2486 return nfserr_serverfault
;
2488 xdr_encode_opaque_fixed(p
, slot
->sl_data
, slot
->sl_datalen
);
2489 xdr_commit_encode(xdr
);
2491 resp
->opcnt
= slot
->sl_opcnt
;
2492 return slot
->sl_status
;
2496 * Set the exchange_id flags returned by the server.
2499 nfsd4_set_ex_flags(struct nfs4_client
*new, struct nfsd4_exchange_id
*clid
)
2501 #ifdef CONFIG_NFSD_PNFS
2502 new->cl_exchange_flags
|= EXCHGID4_FLAG_USE_PNFS_MDS
;
2504 new->cl_exchange_flags
|= EXCHGID4_FLAG_USE_NON_PNFS
;
2507 /* Referrals are supported, Migration is not. */
2508 new->cl_exchange_flags
|= EXCHGID4_FLAG_SUPP_MOVED_REFER
;
2510 /* set the wire flags to return to client. */
2511 clid
->flags
= new->cl_exchange_flags
;
2514 static bool client_has_openowners(struct nfs4_client
*clp
)
2516 struct nfs4_openowner
*oo
;
2518 list_for_each_entry(oo
, &clp
->cl_openowners
, oo_perclient
) {
2519 if (!list_empty(&oo
->oo_owner
.so_stateids
))
2525 static bool client_has_state(struct nfs4_client
*clp
)
2527 return client_has_openowners(clp
)
2528 #ifdef CONFIG_NFSD_PNFS
2529 || !list_empty(&clp
->cl_lo_states
)
2531 || !list_empty(&clp
->cl_delegations
)
2532 || !list_empty(&clp
->cl_sessions
)
2533 || !list_empty(&clp
->async_copies
);
2537 nfsd4_exchange_id(struct svc_rqst
*rqstp
, struct nfsd4_compound_state
*cstate
,
2538 union nfsd4_op_u
*u
)
2540 struct nfsd4_exchange_id
*exid
= &u
->exchange_id
;
2541 struct nfs4_client
*conf
, *new;
2542 struct nfs4_client
*unconf
= NULL
;
2544 char addr_str
[INET6_ADDRSTRLEN
];
2545 nfs4_verifier verf
= exid
->verifier
;
2546 struct sockaddr
*sa
= svc_addr(rqstp
);
2547 bool update
= exid
->flags
& EXCHGID4_FLAG_UPD_CONFIRMED_REC_A
;
2548 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
2550 rpc_ntop(sa
, addr_str
, sizeof(addr_str
));
2551 dprintk("%s rqstp=%p exid=%p clname.len=%u clname.data=%p "
2552 "ip_addr=%s flags %x, spa_how %d\n",
2553 __func__
, rqstp
, exid
, exid
->clname
.len
, exid
->clname
.data
,
2554 addr_str
, exid
->flags
, exid
->spa_how
);
2556 if (exid
->flags
& ~EXCHGID4_FLAG_MASK_A
)
2557 return nfserr_inval
;
2559 new = create_client(exid
->clname
, rqstp
, &verf
);
2561 return nfserr_jukebox
;
2563 switch (exid
->spa_how
) {
2565 exid
->spo_must_enforce
[0] = 0;
2566 exid
->spo_must_enforce
[1] = (
2567 1 << (OP_BIND_CONN_TO_SESSION
- 32) |
2568 1 << (OP_EXCHANGE_ID
- 32) |
2569 1 << (OP_CREATE_SESSION
- 32) |
2570 1 << (OP_DESTROY_SESSION
- 32) |
2571 1 << (OP_DESTROY_CLIENTID
- 32));
2573 exid
->spo_must_allow
[0] &= (1 << (OP_CLOSE
) |
2574 1 << (OP_OPEN_DOWNGRADE
) |
2576 1 << (OP_DELEGRETURN
));
2578 exid
->spo_must_allow
[1] &= (
2579 1 << (OP_TEST_STATEID
- 32) |
2580 1 << (OP_FREE_STATEID
- 32));
2581 if (!svc_rqst_integrity_protected(rqstp
)) {
2582 status
= nfserr_inval
;
2586 * Sometimes userspace doesn't give us a principal.
2587 * Which is a bug, really. Anyway, we can't enforce
2588 * MACH_CRED in that case, better to give up now:
2590 if (!new->cl_cred
.cr_principal
&&
2591 !new->cl_cred
.cr_raw_principal
) {
2592 status
= nfserr_serverfault
;
2595 new->cl_mach_cred
= true;
2598 default: /* checked by xdr code */
2602 status
= nfserr_encr_alg_unsupp
;
2606 /* Cases below refer to rfc 5661 section 18.35.4: */
2607 spin_lock(&nn
->client_lock
);
2608 conf
= find_confirmed_client_by_name(&exid
->clname
, nn
);
2610 bool creds_match
= same_creds(&conf
->cl_cred
, &rqstp
->rq_cred
);
2611 bool verfs_match
= same_verf(&verf
, &conf
->cl_verifier
);
2614 if (!clp_used_exchangeid(conf
)) { /* buggy client */
2615 status
= nfserr_inval
;
2618 if (!nfsd4_mach_creds_match(conf
, rqstp
)) {
2619 status
= nfserr_wrong_cred
;
2622 if (!creds_match
) { /* case 9 */
2623 status
= nfserr_perm
;
2626 if (!verfs_match
) { /* case 8 */
2627 status
= nfserr_not_same
;
2631 exid
->flags
|= EXCHGID4_FLAG_CONFIRMED_R
;
2634 if (!creds_match
) { /* case 3 */
2635 if (client_has_state(conf
)) {
2636 status
= nfserr_clid_inuse
;
2641 if (verfs_match
) { /* case 2 */
2642 conf
->cl_exchange_flags
|= EXCHGID4_FLAG_CONFIRMED_R
;
2645 /* case 5, client reboot */
2650 if (update
) { /* case 7 */
2651 status
= nfserr_noent
;
2655 unconf
= find_unconfirmed_client_by_name(&exid
->clname
, nn
);
2656 if (unconf
) /* case 4, possible retry or client restart */
2657 unhash_client_locked(unconf
);
2659 /* case 1 (normal case) */
2662 status
= mark_client_expired_locked(conf
);
2666 new->cl_minorversion
= cstate
->minorversion
;
2667 new->cl_spo_must_allow
.u
.words
[0] = exid
->spo_must_allow
[0];
2668 new->cl_spo_must_allow
.u
.words
[1] = exid
->spo_must_allow
[1];
2671 add_to_unconfirmed(new);
2674 exid
->clientid
.cl_boot
= conf
->cl_clientid
.cl_boot
;
2675 exid
->clientid
.cl_id
= conf
->cl_clientid
.cl_id
;
2677 exid
->seqid
= conf
->cl_cs_slot
.sl_seqid
+ 1;
2678 nfsd4_set_ex_flags(conf
, exid
);
2680 dprintk("nfsd4_exchange_id seqid %d flags %x\n",
2681 conf
->cl_cs_slot
.sl_seqid
, conf
->cl_exchange_flags
);
2685 spin_unlock(&nn
->client_lock
);
2690 expire_client(unconf
);
2695 check_slot_seqid(u32 seqid
, u32 slot_seqid
, int slot_inuse
)
2697 dprintk("%s enter. seqid %d slot_seqid %d\n", __func__
, seqid
,
2700 /* The slot is in use, and no response has been sent. */
2702 if (seqid
== slot_seqid
)
2703 return nfserr_jukebox
;
2705 return nfserr_seq_misordered
;
2707 /* Note unsigned 32-bit arithmetic handles wraparound: */
2708 if (likely(seqid
== slot_seqid
+ 1))
2710 if (seqid
== slot_seqid
)
2711 return nfserr_replay_cache
;
2712 return nfserr_seq_misordered
;
2716 * Cache the create session result into the create session single DRC
2717 * slot cache by saving the xdr structure. sl_seqid has been set.
2718 * Do this for solo or embedded create session operations.
2721 nfsd4_cache_create_session(struct nfsd4_create_session
*cr_ses
,
2722 struct nfsd4_clid_slot
*slot
, __be32 nfserr
)
2724 slot
->sl_status
= nfserr
;
2725 memcpy(&slot
->sl_cr_ses
, cr_ses
, sizeof(*cr_ses
));
2729 nfsd4_replay_create_session(struct nfsd4_create_session
*cr_ses
,
2730 struct nfsd4_clid_slot
*slot
)
2732 memcpy(cr_ses
, &slot
->sl_cr_ses
, sizeof(*cr_ses
));
2733 return slot
->sl_status
;
2736 #define NFSD_MIN_REQ_HDR_SEQ_SZ ((\
2737 2 * 2 + /* credential,verifier: AUTH_NULL, length 0 */ \
2738 1 + /* MIN tag is length with zero, only length */ \
2739 3 + /* version, opcount, opcode */ \
2740 XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
2741 /* seqid, slotID, slotID, cache */ \
2742 4 ) * sizeof(__be32))
2744 #define NFSD_MIN_RESP_HDR_SEQ_SZ ((\
2745 2 + /* verifier: AUTH_NULL, length 0 */\
2747 1 + /* MIN tag is length with zero, only length */ \
2748 3 + /* opcount, opcode, opstatus*/ \
2749 XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
2750 /* seqid, slotID, slotID, slotID, status */ \
2751 5 ) * sizeof(__be32))
2753 static __be32
check_forechannel_attrs(struct nfsd4_channel_attrs
*ca
, struct nfsd_net
*nn
)
2755 u32 maxrpc
= nn
->nfsd_serv
->sv_max_mesg
;
2757 if (ca
->maxreq_sz
< NFSD_MIN_REQ_HDR_SEQ_SZ
)
2758 return nfserr_toosmall
;
2759 if (ca
->maxresp_sz
< NFSD_MIN_RESP_HDR_SEQ_SZ
)
2760 return nfserr_toosmall
;
2761 ca
->headerpadsz
= 0;
2762 ca
->maxreq_sz
= min_t(u32
, ca
->maxreq_sz
, maxrpc
);
2763 ca
->maxresp_sz
= min_t(u32
, ca
->maxresp_sz
, maxrpc
);
2764 ca
->maxops
= min_t(u32
, ca
->maxops
, NFSD_MAX_OPS_PER_COMPOUND
);
2765 ca
->maxresp_cached
= min_t(u32
, ca
->maxresp_cached
,
2766 NFSD_SLOT_CACHE_SIZE
+ NFSD_MIN_HDR_SEQ_SZ
);
2767 ca
->maxreqs
= min_t(u32
, ca
->maxreqs
, NFSD_MAX_SLOTS_PER_SESSION
);
2769 * Note decreasing slot size below client's request may make it
2770 * difficult for client to function correctly, whereas
2771 * decreasing the number of slots will (just?) affect
2772 * performance. When short on memory we therefore prefer to
2773 * decrease number of slots instead of their size. Clients that
2774 * request larger slots than they need will get poor results:
2776 ca
->maxreqs
= nfsd4_get_drc_mem(ca
);
2778 return nfserr_jukebox
;
2784 * Server's NFSv4.1 backchannel support is AUTH_SYS-only for now.
2785 * These are based on similar macros in linux/sunrpc/msg_prot.h .
2787 #define RPC_MAX_HEADER_WITH_AUTH_SYS \
2788 (RPC_CALLHDRSIZE + 2 * (2 + UNX_CALLSLACK))
2790 #define RPC_MAX_REPHEADER_WITH_AUTH_SYS \
2791 (RPC_REPHDRSIZE + (2 + NUL_REPLYSLACK))
2793 #define NFSD_CB_MAX_REQ_SZ ((NFS4_enc_cb_recall_sz + \
2794 RPC_MAX_HEADER_WITH_AUTH_SYS) * sizeof(__be32))
2795 #define NFSD_CB_MAX_RESP_SZ ((NFS4_dec_cb_recall_sz + \
2796 RPC_MAX_REPHEADER_WITH_AUTH_SYS) * \
2799 static __be32
check_backchannel_attrs(struct nfsd4_channel_attrs
*ca
)
2801 ca
->headerpadsz
= 0;
2803 if (ca
->maxreq_sz
< NFSD_CB_MAX_REQ_SZ
)
2804 return nfserr_toosmall
;
2805 if (ca
->maxresp_sz
< NFSD_CB_MAX_RESP_SZ
)
2806 return nfserr_toosmall
;
2807 ca
->maxresp_cached
= 0;
2809 return nfserr_toosmall
;
2814 static __be32
nfsd4_check_cb_sec(struct nfsd4_cb_sec
*cbs
)
2816 switch (cbs
->flavor
) {
2822 * GSS case: the spec doesn't allow us to return this
2823 * error. But it also doesn't allow us not to support
2825 * I'd rather this fail hard than return some error the
2826 * client might think it can already handle:
2828 return nfserr_encr_alg_unsupp
;
2833 nfsd4_create_session(struct svc_rqst
*rqstp
,
2834 struct nfsd4_compound_state
*cstate
, union nfsd4_op_u
*u
)
2836 struct nfsd4_create_session
*cr_ses
= &u
->create_session
;
2837 struct sockaddr
*sa
= svc_addr(rqstp
);
2838 struct nfs4_client
*conf
, *unconf
;
2839 struct nfs4_client
*old
= NULL
;
2840 struct nfsd4_session
*new;
2841 struct nfsd4_conn
*conn
;
2842 struct nfsd4_clid_slot
*cs_slot
= NULL
;
2844 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
2846 if (cr_ses
->flags
& ~SESSION4_FLAG_MASK_A
)
2847 return nfserr_inval
;
2848 status
= nfsd4_check_cb_sec(&cr_ses
->cb_sec
);
2851 status
= check_forechannel_attrs(&cr_ses
->fore_channel
, nn
);
2854 status
= check_backchannel_attrs(&cr_ses
->back_channel
);
2856 goto out_release_drc_mem
;
2857 status
= nfserr_jukebox
;
2858 new = alloc_session(&cr_ses
->fore_channel
, &cr_ses
->back_channel
);
2860 goto out_release_drc_mem
;
2861 conn
= alloc_conn_from_crses(rqstp
, cr_ses
);
2863 goto out_free_session
;
2865 spin_lock(&nn
->client_lock
);
2866 unconf
= find_unconfirmed_client(&cr_ses
->clientid
, true, nn
);
2867 conf
= find_confirmed_client(&cr_ses
->clientid
, true, nn
);
2868 WARN_ON_ONCE(conf
&& unconf
);
2871 status
= nfserr_wrong_cred
;
2872 if (!nfsd4_mach_creds_match(conf
, rqstp
))
2874 cs_slot
= &conf
->cl_cs_slot
;
2875 status
= check_slot_seqid(cr_ses
->seqid
, cs_slot
->sl_seqid
, 0);
2877 if (status
== nfserr_replay_cache
)
2878 status
= nfsd4_replay_create_session(cr_ses
, cs_slot
);
2881 } else if (unconf
) {
2882 if (!same_creds(&unconf
->cl_cred
, &rqstp
->rq_cred
) ||
2883 !rpc_cmp_addr(sa
, (struct sockaddr
*) &unconf
->cl_addr
)) {
2884 status
= nfserr_clid_inuse
;
2887 status
= nfserr_wrong_cred
;
2888 if (!nfsd4_mach_creds_match(unconf
, rqstp
))
2890 cs_slot
= &unconf
->cl_cs_slot
;
2891 status
= check_slot_seqid(cr_ses
->seqid
, cs_slot
->sl_seqid
, 0);
2893 /* an unconfirmed replay returns misordered */
2894 status
= nfserr_seq_misordered
;
2897 old
= find_confirmed_client_by_name(&unconf
->cl_name
, nn
);
2899 status
= mark_client_expired_locked(old
);
2905 move_to_confirmed(unconf
);
2908 status
= nfserr_stale_clientid
;
2912 /* Persistent sessions are not supported */
2913 cr_ses
->flags
&= ~SESSION4_PERSIST
;
2914 /* Upshifting from TCP to RDMA is not supported */
2915 cr_ses
->flags
&= ~SESSION4_RDMA
;
2917 init_session(rqstp
, new, conf
, cr_ses
);
2918 nfsd4_get_session_locked(new);
2920 memcpy(cr_ses
->sessionid
.data
, new->se_sessionid
.data
,
2921 NFS4_MAX_SESSIONID_LEN
);
2922 cs_slot
->sl_seqid
++;
2923 cr_ses
->seqid
= cs_slot
->sl_seqid
;
2925 /* cache solo and embedded create sessions under the client_lock */
2926 nfsd4_cache_create_session(cr_ses
, cs_slot
, status
);
2927 spin_unlock(&nn
->client_lock
);
2928 /* init connection and backchannel */
2929 nfsd4_init_conn(rqstp
, conn
, new);
2930 nfsd4_put_session(new);
2935 spin_unlock(&nn
->client_lock
);
2940 __free_session(new);
2941 out_release_drc_mem
:
2942 nfsd4_put_drc_mem(&cr_ses
->fore_channel
);
2946 static __be32
nfsd4_map_bcts_dir(u32
*dir
)
2949 case NFS4_CDFC4_FORE
:
2950 case NFS4_CDFC4_BACK
:
2952 case NFS4_CDFC4_FORE_OR_BOTH
:
2953 case NFS4_CDFC4_BACK_OR_BOTH
:
2954 *dir
= NFS4_CDFC4_BOTH
;
2957 return nfserr_inval
;
2960 __be32
nfsd4_backchannel_ctl(struct svc_rqst
*rqstp
,
2961 struct nfsd4_compound_state
*cstate
,
2962 union nfsd4_op_u
*u
)
2964 struct nfsd4_backchannel_ctl
*bc
= &u
->backchannel_ctl
;
2965 struct nfsd4_session
*session
= cstate
->session
;
2966 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
2969 status
= nfsd4_check_cb_sec(&bc
->bc_cb_sec
);
2972 spin_lock(&nn
->client_lock
);
2973 session
->se_cb_prog
= bc
->bc_cb_program
;
2974 session
->se_cb_sec
= bc
->bc_cb_sec
;
2975 spin_unlock(&nn
->client_lock
);
2977 nfsd4_probe_callback(session
->se_client
);
2982 __be32
nfsd4_bind_conn_to_session(struct svc_rqst
*rqstp
,
2983 struct nfsd4_compound_state
*cstate
,
2984 union nfsd4_op_u
*u
)
2986 struct nfsd4_bind_conn_to_session
*bcts
= &u
->bind_conn_to_session
;
2988 struct nfsd4_conn
*conn
;
2989 struct nfsd4_session
*session
;
2990 struct net
*net
= SVC_NET(rqstp
);
2991 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
2993 if (!nfsd4_last_compound_op(rqstp
))
2994 return nfserr_not_only_op
;
2995 spin_lock(&nn
->client_lock
);
2996 session
= find_in_sessionid_hashtbl(&bcts
->sessionid
, net
, &status
);
2997 spin_unlock(&nn
->client_lock
);
2999 goto out_no_session
;
3000 status
= nfserr_wrong_cred
;
3001 if (!nfsd4_mach_creds_match(session
->se_client
, rqstp
))
3003 status
= nfsd4_map_bcts_dir(&bcts
->dir
);
3006 conn
= alloc_conn(rqstp
, bcts
->dir
);
3007 status
= nfserr_jukebox
;
3010 nfsd4_init_conn(rqstp
, conn
, session
);
3013 nfsd4_put_session(session
);
3018 static bool nfsd4_compound_in_session(struct nfsd4_compound_state
*cstate
, struct nfs4_sessionid
*sid
)
3020 if (!cstate
->session
)
3022 return !memcmp(sid
, &cstate
->session
->se_sessionid
, sizeof(*sid
));
3026 nfsd4_destroy_session(struct svc_rqst
*r
, struct nfsd4_compound_state
*cstate
,
3027 union nfsd4_op_u
*u
)
3029 struct nfs4_sessionid
*sessionid
= &u
->destroy_session
.sessionid
;
3030 struct nfsd4_session
*ses
;
3032 int ref_held_by_me
= 0;
3033 struct net
*net
= SVC_NET(r
);
3034 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
3036 status
= nfserr_not_only_op
;
3037 if (nfsd4_compound_in_session(cstate
, sessionid
)) {
3038 if (!nfsd4_last_compound_op(r
))
3042 dump_sessionid(__func__
, sessionid
);
3043 spin_lock(&nn
->client_lock
);
3044 ses
= find_in_sessionid_hashtbl(sessionid
, net
, &status
);
3046 goto out_client_lock
;
3047 status
= nfserr_wrong_cred
;
3048 if (!nfsd4_mach_creds_match(ses
->se_client
, r
))
3049 goto out_put_session
;
3050 status
= mark_session_dead_locked(ses
, 1 + ref_held_by_me
);
3052 goto out_put_session
;
3053 unhash_session(ses
);
3054 spin_unlock(&nn
->client_lock
);
3056 nfsd4_probe_callback_sync(ses
->se_client
);
3058 spin_lock(&nn
->client_lock
);
3061 nfsd4_put_session_locked(ses
);
3063 spin_unlock(&nn
->client_lock
);
3068 static struct nfsd4_conn
*__nfsd4_find_conn(struct svc_xprt
*xpt
, struct nfsd4_session
*s
)
3070 struct nfsd4_conn
*c
;
3072 list_for_each_entry(c
, &s
->se_conns
, cn_persession
) {
3073 if (c
->cn_xprt
== xpt
) {
3080 static __be32
nfsd4_sequence_check_conn(struct nfsd4_conn
*new, struct nfsd4_session
*ses
)
3082 struct nfs4_client
*clp
= ses
->se_client
;
3083 struct nfsd4_conn
*c
;
3084 __be32 status
= nfs_ok
;
3087 spin_lock(&clp
->cl_lock
);
3088 c
= __nfsd4_find_conn(new->cn_xprt
, ses
);
3091 status
= nfserr_conn_not_bound_to_session
;
3092 if (clp
->cl_mach_cred
)
3094 __nfsd4_hash_conn(new, ses
);
3095 spin_unlock(&clp
->cl_lock
);
3096 ret
= nfsd4_register_conn(new);
3098 /* oops; xprt is already down: */
3099 nfsd4_conn_lost(&new->cn_xpt_user
);
3102 spin_unlock(&clp
->cl_lock
);
3107 static bool nfsd4_session_too_many_ops(struct svc_rqst
*rqstp
, struct nfsd4_session
*session
)
3109 struct nfsd4_compoundargs
*args
= rqstp
->rq_argp
;
3111 return args
->opcnt
> session
->se_fchannel
.maxops
;
3114 static bool nfsd4_request_too_big(struct svc_rqst
*rqstp
,
3115 struct nfsd4_session
*session
)
3117 struct xdr_buf
*xb
= &rqstp
->rq_arg
;
3119 return xb
->len
> session
->se_fchannel
.maxreq_sz
;
3122 static bool replay_matches_cache(struct svc_rqst
*rqstp
,
3123 struct nfsd4_sequence
*seq
, struct nfsd4_slot
*slot
)
3125 struct nfsd4_compoundargs
*argp
= rqstp
->rq_argp
;
3127 if ((bool)(slot
->sl_flags
& NFSD4_SLOT_CACHETHIS
) !=
3128 (bool)seq
->cachethis
)
3131 * If there's an error than the reply can have fewer ops than
3132 * the call. But if we cached a reply with *more* ops than the
3133 * call you're sending us now, then this new call is clearly not
3134 * really a replay of the old one:
3136 if (slot
->sl_opcnt
< argp
->opcnt
)
3138 /* This is the only check explicitly called by spec: */
3139 if (!same_creds(&rqstp
->rq_cred
, &slot
->sl_cred
))
3142 * There may be more comparisons we could actually do, but the
3143 * spec doesn't require us to catch every case where the calls
3144 * don't match (that would require caching the call as well as
3145 * the reply), so we don't bother.
3151 nfsd4_sequence(struct svc_rqst
*rqstp
, struct nfsd4_compound_state
*cstate
,
3152 union nfsd4_op_u
*u
)
3154 struct nfsd4_sequence
*seq
= &u
->sequence
;
3155 struct nfsd4_compoundres
*resp
= rqstp
->rq_resp
;
3156 struct xdr_stream
*xdr
= &resp
->xdr
;
3157 struct nfsd4_session
*session
;
3158 struct nfs4_client
*clp
;
3159 struct nfsd4_slot
*slot
;
3160 struct nfsd4_conn
*conn
;
3163 struct net
*net
= SVC_NET(rqstp
);
3164 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
3166 if (resp
->opcnt
!= 1)
3167 return nfserr_sequence_pos
;
3170 * Will be either used or freed by nfsd4_sequence_check_conn
3173 conn
= alloc_conn(rqstp
, NFS4_CDFC4_FORE
);
3175 return nfserr_jukebox
;
3177 spin_lock(&nn
->client_lock
);
3178 session
= find_in_sessionid_hashtbl(&seq
->sessionid
, net
, &status
);
3180 goto out_no_session
;
3181 clp
= session
->se_client
;
3183 status
= nfserr_too_many_ops
;
3184 if (nfsd4_session_too_many_ops(rqstp
, session
))
3185 goto out_put_session
;
3187 status
= nfserr_req_too_big
;
3188 if (nfsd4_request_too_big(rqstp
, session
))
3189 goto out_put_session
;
3191 status
= nfserr_badslot
;
3192 if (seq
->slotid
>= session
->se_fchannel
.maxreqs
)
3193 goto out_put_session
;
3195 slot
= session
->se_slots
[seq
->slotid
];
3196 dprintk("%s: slotid %d\n", __func__
, seq
->slotid
);
3198 /* We do not negotiate the number of slots yet, so set the
3199 * maxslots to the session maxreqs which is used to encode
3200 * sr_highest_slotid and the sr_target_slot id to maxslots */
3201 seq
->maxslots
= session
->se_fchannel
.maxreqs
;
3203 status
= check_slot_seqid(seq
->seqid
, slot
->sl_seqid
,
3204 slot
->sl_flags
& NFSD4_SLOT_INUSE
);
3205 if (status
== nfserr_replay_cache
) {
3206 status
= nfserr_seq_misordered
;
3207 if (!(slot
->sl_flags
& NFSD4_SLOT_INITIALIZED
))
3208 goto out_put_session
;
3209 status
= nfserr_seq_false_retry
;
3210 if (!replay_matches_cache(rqstp
, seq
, slot
))
3211 goto out_put_session
;
3212 cstate
->slot
= slot
;
3213 cstate
->session
= session
;
3215 /* Return the cached reply status and set cstate->status
3216 * for nfsd4_proc_compound processing */
3217 status
= nfsd4_replay_cache_entry(resp
, seq
);
3218 cstate
->status
= nfserr_replay_cache
;
3222 goto out_put_session
;
3224 status
= nfsd4_sequence_check_conn(conn
, session
);
3227 goto out_put_session
;
3229 buflen
= (seq
->cachethis
) ?
3230 session
->se_fchannel
.maxresp_cached
:
3231 session
->se_fchannel
.maxresp_sz
;
3232 status
= (seq
->cachethis
) ? nfserr_rep_too_big_to_cache
:
3234 if (xdr_restrict_buflen(xdr
, buflen
- rqstp
->rq_auth_slack
))
3235 goto out_put_session
;
3236 svc_reserve(rqstp
, buflen
);
3239 /* Success! bump slot seqid */
3240 slot
->sl_seqid
= seq
->seqid
;
3241 slot
->sl_flags
|= NFSD4_SLOT_INUSE
;
3243 slot
->sl_flags
|= NFSD4_SLOT_CACHETHIS
;
3245 slot
->sl_flags
&= ~NFSD4_SLOT_CACHETHIS
;
3247 cstate
->slot
= slot
;
3248 cstate
->session
= session
;
3252 switch (clp
->cl_cb_state
) {
3254 seq
->status_flags
= SEQ4_STATUS_CB_PATH_DOWN
;
3256 case NFSD4_CB_FAULT
:
3257 seq
->status_flags
= SEQ4_STATUS_BACKCHANNEL_FAULT
;
3260 seq
->status_flags
= 0;
3262 if (!list_empty(&clp
->cl_revoked
))
3263 seq
->status_flags
|= SEQ4_STATUS_RECALLABLE_STATE_REVOKED
;
3267 spin_unlock(&nn
->client_lock
);
3270 nfsd4_put_session_locked(session
);
3271 goto out_no_session
;
3275 nfsd4_sequence_done(struct nfsd4_compoundres
*resp
)
3277 struct nfsd4_compound_state
*cs
= &resp
->cstate
;
3279 if (nfsd4_has_session(cs
)) {
3280 if (cs
->status
!= nfserr_replay_cache
) {
3281 nfsd4_store_cache_entry(resp
);
3282 cs
->slot
->sl_flags
&= ~NFSD4_SLOT_INUSE
;
3284 /* Drop session reference that was taken in nfsd4_sequence() */
3285 nfsd4_put_session(cs
->session
);
3287 put_client_renew(cs
->clp
);
3291 nfsd4_destroy_clientid(struct svc_rqst
*rqstp
,
3292 struct nfsd4_compound_state
*cstate
,
3293 union nfsd4_op_u
*u
)
3295 struct nfsd4_destroy_clientid
*dc
= &u
->destroy_clientid
;
3296 struct nfs4_client
*conf
, *unconf
;
3297 struct nfs4_client
*clp
= NULL
;
3299 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
3301 spin_lock(&nn
->client_lock
);
3302 unconf
= find_unconfirmed_client(&dc
->clientid
, true, nn
);
3303 conf
= find_confirmed_client(&dc
->clientid
, true, nn
);
3304 WARN_ON_ONCE(conf
&& unconf
);
3307 if (client_has_state(conf
)) {
3308 status
= nfserr_clientid_busy
;
3311 status
= mark_client_expired_locked(conf
);
3318 status
= nfserr_stale_clientid
;
3321 if (!nfsd4_mach_creds_match(clp
, rqstp
)) {
3323 status
= nfserr_wrong_cred
;
3326 unhash_client_locked(clp
);
3328 spin_unlock(&nn
->client_lock
);
3335 nfsd4_reclaim_complete(struct svc_rqst
*rqstp
,
3336 struct nfsd4_compound_state
*cstate
, union nfsd4_op_u
*u
)
3338 struct nfsd4_reclaim_complete
*rc
= &u
->reclaim_complete
;
3341 if (rc
->rca_one_fs
) {
3342 if (!cstate
->current_fh
.fh_dentry
)
3343 return nfserr_nofilehandle
;
3345 * We don't take advantage of the rca_one_fs case.
3346 * That's OK, it's optional, we can safely ignore it.
3351 status
= nfserr_complete_already
;
3352 if (test_and_set_bit(NFSD4_CLIENT_RECLAIM_COMPLETE
,
3353 &cstate
->session
->se_client
->cl_flags
))
3356 status
= nfserr_stale_clientid
;
3357 if (is_client_expired(cstate
->session
->se_client
))
3359 * The following error isn't really legal.
3360 * But we only get here if the client just explicitly
3361 * destroyed the client. Surely it no longer cares what
3362 * error it gets back on an operation for the dead
3368 nfsd4_client_record_create(cstate
->session
->se_client
);
3369 inc_reclaim_complete(cstate
->session
->se_client
);
3375 nfsd4_setclientid(struct svc_rqst
*rqstp
, struct nfsd4_compound_state
*cstate
,
3376 union nfsd4_op_u
*u
)
3378 struct nfsd4_setclientid
*setclid
= &u
->setclientid
;
3379 struct xdr_netobj clname
= setclid
->se_name
;
3380 nfs4_verifier clverifier
= setclid
->se_verf
;
3381 struct nfs4_client
*conf
, *new;
3382 struct nfs4_client
*unconf
= NULL
;
3384 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
3386 new = create_client(clname
, rqstp
, &clverifier
);
3388 return nfserr_jukebox
;
3389 /* Cases below refer to rfc 3530 section 14.2.33: */
3390 spin_lock(&nn
->client_lock
);
3391 conf
= find_confirmed_client_by_name(&clname
, nn
);
3392 if (conf
&& client_has_state(conf
)) {
3394 status
= nfserr_clid_inuse
;
3395 if (clp_used_exchangeid(conf
))
3397 if (!same_creds(&conf
->cl_cred
, &rqstp
->rq_cred
)) {
3398 char addr_str
[INET6_ADDRSTRLEN
];
3399 rpc_ntop((struct sockaddr
*) &conf
->cl_addr
, addr_str
,
3401 dprintk("NFSD: setclientid: string in use by client "
3402 "at %s\n", addr_str
);
3406 unconf
= find_unconfirmed_client_by_name(&clname
, nn
);
3408 unhash_client_locked(unconf
);
3409 if (conf
&& same_verf(&conf
->cl_verifier
, &clverifier
)) {
3410 /* case 1: probable callback update */
3411 copy_clid(new, conf
);
3412 gen_confirm(new, nn
);
3413 } else /* case 4 (new client) or cases 2, 3 (client reboot): */
3415 new->cl_minorversion
= 0;
3416 gen_callback(new, setclid
, rqstp
);
3417 add_to_unconfirmed(new);
3418 setclid
->se_clientid
.cl_boot
= new->cl_clientid
.cl_boot
;
3419 setclid
->se_clientid
.cl_id
= new->cl_clientid
.cl_id
;
3420 memcpy(setclid
->se_confirm
.data
, new->cl_confirm
.data
, sizeof(setclid
->se_confirm
.data
));
3424 spin_unlock(&nn
->client_lock
);
3428 expire_client(unconf
);
3434 nfsd4_setclientid_confirm(struct svc_rqst
*rqstp
,
3435 struct nfsd4_compound_state
*cstate
,
3436 union nfsd4_op_u
*u
)
3438 struct nfsd4_setclientid_confirm
*setclientid_confirm
=
3439 &u
->setclientid_confirm
;
3440 struct nfs4_client
*conf
, *unconf
;
3441 struct nfs4_client
*old
= NULL
;
3442 nfs4_verifier confirm
= setclientid_confirm
->sc_confirm
;
3443 clientid_t
* clid
= &setclientid_confirm
->sc_clientid
;
3445 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
3447 if (STALE_CLIENTID(clid
, nn
))
3448 return nfserr_stale_clientid
;
3450 spin_lock(&nn
->client_lock
);
3451 conf
= find_confirmed_client(clid
, false, nn
);
3452 unconf
= find_unconfirmed_client(clid
, false, nn
);
3454 * We try hard to give out unique clientid's, so if we get an
3455 * attempt to confirm the same clientid with a different cred,
3456 * the client may be buggy; this should never happen.
3458 * Nevertheless, RFC 7530 recommends INUSE for this case:
3460 status
= nfserr_clid_inuse
;
3461 if (unconf
&& !same_creds(&unconf
->cl_cred
, &rqstp
->rq_cred
))
3463 if (conf
&& !same_creds(&conf
->cl_cred
, &rqstp
->rq_cred
))
3465 /* cases below refer to rfc 3530 section 14.2.34: */
3466 if (!unconf
|| !same_verf(&confirm
, &unconf
->cl_confirm
)) {
3467 if (conf
&& same_verf(&confirm
, &conf
->cl_confirm
)) {
3468 /* case 2: probable retransmit */
3470 } else /* case 4: client hasn't noticed we rebooted yet? */
3471 status
= nfserr_stale_clientid
;
3475 if (conf
) { /* case 1: callback update */
3477 unhash_client_locked(old
);
3478 nfsd4_change_callback(conf
, &unconf
->cl_cb_conn
);
3479 } else { /* case 3: normal case; new or rebooted client */
3480 old
= find_confirmed_client_by_name(&unconf
->cl_name
, nn
);
3482 status
= nfserr_clid_inuse
;
3483 if (client_has_state(old
)
3484 && !same_creds(&unconf
->cl_cred
,
3487 status
= mark_client_expired_locked(old
);
3493 move_to_confirmed(unconf
);
3496 get_client_locked(conf
);
3497 spin_unlock(&nn
->client_lock
);
3498 nfsd4_probe_callback(conf
);
3499 spin_lock(&nn
->client_lock
);
3500 put_client_renew_locked(conf
);
3502 spin_unlock(&nn
->client_lock
);
3508 static struct nfs4_file
*nfsd4_alloc_file(void)
3510 return kmem_cache_alloc(file_slab
, GFP_KERNEL
);
3513 /* OPEN Share state helper functions */
3514 static void nfsd4_init_file(struct knfsd_fh
*fh
, unsigned int hashval
,
3515 struct nfs4_file
*fp
)
3517 lockdep_assert_held(&state_lock
);
3519 refcount_set(&fp
->fi_ref
, 1);
3520 spin_lock_init(&fp
->fi_lock
);
3521 INIT_LIST_HEAD(&fp
->fi_stateids
);
3522 INIT_LIST_HEAD(&fp
->fi_delegations
);
3523 INIT_LIST_HEAD(&fp
->fi_clnt_odstate
);
3524 fh_copy_shallow(&fp
->fi_fhandle
, fh
);
3525 fp
->fi_deleg_file
= NULL
;
3526 fp
->fi_had_conflict
= false;
3527 fp
->fi_share_deny
= 0;
3528 memset(fp
->fi_fds
, 0, sizeof(fp
->fi_fds
));
3529 memset(fp
->fi_access
, 0, sizeof(fp
->fi_access
));
3530 #ifdef CONFIG_NFSD_PNFS
3531 INIT_LIST_HEAD(&fp
->fi_lo_states
);
3532 atomic_set(&fp
->fi_lo_recalls
, 0);
3534 hlist_add_head_rcu(&fp
->fi_hash
, &file_hashtbl
[hashval
]);
3538 nfsd4_free_slabs(void)
3540 kmem_cache_destroy(client_slab
);
3541 kmem_cache_destroy(openowner_slab
);
3542 kmem_cache_destroy(lockowner_slab
);
3543 kmem_cache_destroy(file_slab
);
3544 kmem_cache_destroy(stateid_slab
);
3545 kmem_cache_destroy(deleg_slab
);
3546 kmem_cache_destroy(odstate_slab
);
3550 nfsd4_init_slabs(void)
3552 client_slab
= kmem_cache_create("nfsd4_clients",
3553 sizeof(struct nfs4_client
), 0, 0, NULL
);
3554 if (client_slab
== NULL
)
3556 openowner_slab
= kmem_cache_create("nfsd4_openowners",
3557 sizeof(struct nfs4_openowner
), 0, 0, NULL
);
3558 if (openowner_slab
== NULL
)
3559 goto out_free_client_slab
;
3560 lockowner_slab
= kmem_cache_create("nfsd4_lockowners",
3561 sizeof(struct nfs4_lockowner
), 0, 0, NULL
);
3562 if (lockowner_slab
== NULL
)
3563 goto out_free_openowner_slab
;
3564 file_slab
= kmem_cache_create("nfsd4_files",
3565 sizeof(struct nfs4_file
), 0, 0, NULL
);
3566 if (file_slab
== NULL
)
3567 goto out_free_lockowner_slab
;
3568 stateid_slab
= kmem_cache_create("nfsd4_stateids",
3569 sizeof(struct nfs4_ol_stateid
), 0, 0, NULL
);
3570 if (stateid_slab
== NULL
)
3571 goto out_free_file_slab
;
3572 deleg_slab
= kmem_cache_create("nfsd4_delegations",
3573 sizeof(struct nfs4_delegation
), 0, 0, NULL
);
3574 if (deleg_slab
== NULL
)
3575 goto out_free_stateid_slab
;
3576 odstate_slab
= kmem_cache_create("nfsd4_odstate",
3577 sizeof(struct nfs4_clnt_odstate
), 0, 0, NULL
);
3578 if (odstate_slab
== NULL
)
3579 goto out_free_deleg_slab
;
3582 out_free_deleg_slab
:
3583 kmem_cache_destroy(deleg_slab
);
3584 out_free_stateid_slab
:
3585 kmem_cache_destroy(stateid_slab
);
3587 kmem_cache_destroy(file_slab
);
3588 out_free_lockowner_slab
:
3589 kmem_cache_destroy(lockowner_slab
);
3590 out_free_openowner_slab
:
3591 kmem_cache_destroy(openowner_slab
);
3592 out_free_client_slab
:
3593 kmem_cache_destroy(client_slab
);
3595 dprintk("nfsd4: out of memory while initializing nfsv4\n");
3599 static void init_nfs4_replay(struct nfs4_replay
*rp
)
3601 rp
->rp_status
= nfserr_serverfault
;
3603 rp
->rp_buf
= rp
->rp_ibuf
;
3604 mutex_init(&rp
->rp_mutex
);
3607 static void nfsd4_cstate_assign_replay(struct nfsd4_compound_state
*cstate
,
3608 struct nfs4_stateowner
*so
)
3610 if (!nfsd4_has_session(cstate
)) {
3611 mutex_lock(&so
->so_replay
.rp_mutex
);
3612 cstate
->replay_owner
= nfs4_get_stateowner(so
);
3616 void nfsd4_cstate_clear_replay(struct nfsd4_compound_state
*cstate
)
3618 struct nfs4_stateowner
*so
= cstate
->replay_owner
;
3621 cstate
->replay_owner
= NULL
;
3622 mutex_unlock(&so
->so_replay
.rp_mutex
);
3623 nfs4_put_stateowner(so
);
3627 static inline void *alloc_stateowner(struct kmem_cache
*slab
, struct xdr_netobj
*owner
, struct nfs4_client
*clp
)
3629 struct nfs4_stateowner
*sop
;
3631 sop
= kmem_cache_alloc(slab
, GFP_KERNEL
);
3635 sop
->so_owner
.data
= kmemdup(owner
->data
, owner
->len
, GFP_KERNEL
);
3636 if (!sop
->so_owner
.data
) {
3637 kmem_cache_free(slab
, sop
);
3640 sop
->so_owner
.len
= owner
->len
;
3642 INIT_LIST_HEAD(&sop
->so_stateids
);
3643 sop
->so_client
= clp
;
3644 init_nfs4_replay(&sop
->so_replay
);
3645 atomic_set(&sop
->so_count
, 1);
3649 static void hash_openowner(struct nfs4_openowner
*oo
, struct nfs4_client
*clp
, unsigned int strhashval
)
3651 lockdep_assert_held(&clp
->cl_lock
);
3653 list_add(&oo
->oo_owner
.so_strhash
,
3654 &clp
->cl_ownerstr_hashtbl
[strhashval
]);
3655 list_add(&oo
->oo_perclient
, &clp
->cl_openowners
);
3658 static void nfs4_unhash_openowner(struct nfs4_stateowner
*so
)
3660 unhash_openowner_locked(openowner(so
));
3663 static void nfs4_free_openowner(struct nfs4_stateowner
*so
)
3665 struct nfs4_openowner
*oo
= openowner(so
);
3667 kmem_cache_free(openowner_slab
, oo
);
3670 static const struct nfs4_stateowner_operations openowner_ops
= {
3671 .so_unhash
= nfs4_unhash_openowner
,
3672 .so_free
= nfs4_free_openowner
,
3675 static struct nfs4_ol_stateid
*
3676 nfsd4_find_existing_open(struct nfs4_file
*fp
, struct nfsd4_open
*open
)
3678 struct nfs4_ol_stateid
*local
, *ret
= NULL
;
3679 struct nfs4_openowner
*oo
= open
->op_openowner
;
3681 lockdep_assert_held(&fp
->fi_lock
);
3683 list_for_each_entry(local
, &fp
->fi_stateids
, st_perfile
) {
3684 /* ignore lock owners */
3685 if (local
->st_stateowner
->so_is_open_owner
== 0)
3687 if (local
->st_stateowner
!= &oo
->oo_owner
)
3689 if (local
->st_stid
.sc_type
== NFS4_OPEN_STID
) {
3691 refcount_inc(&ret
->st_stid
.sc_count
);
3699 nfsd4_verify_open_stid(struct nfs4_stid
*s
)
3701 __be32 ret
= nfs_ok
;
3703 switch (s
->sc_type
) {
3707 case NFS4_CLOSED_STID
:
3708 case NFS4_CLOSED_DELEG_STID
:
3709 ret
= nfserr_bad_stateid
;
3711 case NFS4_REVOKED_DELEG_STID
:
3712 ret
= nfserr_deleg_revoked
;
3717 /* Lock the stateid st_mutex, and deal with races with CLOSE */
3719 nfsd4_lock_ol_stateid(struct nfs4_ol_stateid
*stp
)
3723 mutex_lock_nested(&stp
->st_mutex
, LOCK_STATEID_MUTEX
);
3724 ret
= nfsd4_verify_open_stid(&stp
->st_stid
);
3726 mutex_unlock(&stp
->st_mutex
);
3730 static struct nfs4_ol_stateid
*
3731 nfsd4_find_and_lock_existing_open(struct nfs4_file
*fp
, struct nfsd4_open
*open
)
3733 struct nfs4_ol_stateid
*stp
;
3735 spin_lock(&fp
->fi_lock
);
3736 stp
= nfsd4_find_existing_open(fp
, open
);
3737 spin_unlock(&fp
->fi_lock
);
3738 if (!stp
|| nfsd4_lock_ol_stateid(stp
) == nfs_ok
)
3740 nfs4_put_stid(&stp
->st_stid
);
3745 static struct nfs4_openowner
*
3746 alloc_init_open_stateowner(unsigned int strhashval
, struct nfsd4_open
*open
,
3747 struct nfsd4_compound_state
*cstate
)
3749 struct nfs4_client
*clp
= cstate
->clp
;
3750 struct nfs4_openowner
*oo
, *ret
;
3752 oo
= alloc_stateowner(openowner_slab
, &open
->op_owner
, clp
);
3755 oo
->oo_owner
.so_ops
= &openowner_ops
;
3756 oo
->oo_owner
.so_is_open_owner
= 1;
3757 oo
->oo_owner
.so_seqid
= open
->op_seqid
;
3759 if (nfsd4_has_session(cstate
))
3760 oo
->oo_flags
|= NFS4_OO_CONFIRMED
;
3762 oo
->oo_last_closed_stid
= NULL
;
3763 INIT_LIST_HEAD(&oo
->oo_close_lru
);
3764 spin_lock(&clp
->cl_lock
);
3765 ret
= find_openstateowner_str_locked(strhashval
, open
, clp
);
3767 hash_openowner(oo
, clp
, strhashval
);
3770 nfs4_free_stateowner(&oo
->oo_owner
);
3772 spin_unlock(&clp
->cl_lock
);
3776 static struct nfs4_ol_stateid
*
3777 init_open_stateid(struct nfs4_file
*fp
, struct nfsd4_open
*open
)
3780 struct nfs4_openowner
*oo
= open
->op_openowner
;
3781 struct nfs4_ol_stateid
*retstp
= NULL
;
3782 struct nfs4_ol_stateid
*stp
;
3785 /* We are moving these outside of the spinlocks to avoid the warnings */
3786 mutex_init(&stp
->st_mutex
);
3787 mutex_lock_nested(&stp
->st_mutex
, OPEN_STATEID_MUTEX
);
3790 spin_lock(&oo
->oo_owner
.so_client
->cl_lock
);
3791 spin_lock(&fp
->fi_lock
);
3793 retstp
= nfsd4_find_existing_open(fp
, open
);
3797 open
->op_stp
= NULL
;
3798 refcount_inc(&stp
->st_stid
.sc_count
);
3799 stp
->st_stid
.sc_type
= NFS4_OPEN_STID
;
3800 INIT_LIST_HEAD(&stp
->st_locks
);
3801 stp
->st_stateowner
= nfs4_get_stateowner(&oo
->oo_owner
);
3803 stp
->st_stid
.sc_file
= fp
;
3804 stp
->st_access_bmap
= 0;
3805 stp
->st_deny_bmap
= 0;
3806 stp
->st_openstp
= NULL
;
3807 list_add(&stp
->st_perstateowner
, &oo
->oo_owner
.so_stateids
);
3808 list_add(&stp
->st_perfile
, &fp
->fi_stateids
);
3811 spin_unlock(&fp
->fi_lock
);
3812 spin_unlock(&oo
->oo_owner
.so_client
->cl_lock
);
3814 /* Handle races with CLOSE */
3815 if (nfsd4_lock_ol_stateid(retstp
) != nfs_ok
) {
3816 nfs4_put_stid(&retstp
->st_stid
);
3819 /* To keep mutex tracking happy */
3820 mutex_unlock(&stp
->st_mutex
);
3827 * In the 4.0 case we need to keep the owners around a little while to handle
3828 * CLOSE replay. We still do need to release any file access that is held by
3829 * them before returning however.
3832 move_to_close_lru(struct nfs4_ol_stateid
*s
, struct net
*net
)
3834 struct nfs4_ol_stateid
*last
;
3835 struct nfs4_openowner
*oo
= openowner(s
->st_stateowner
);
3836 struct nfsd_net
*nn
= net_generic(s
->st_stid
.sc_client
->net
,
3839 dprintk("NFSD: move_to_close_lru nfs4_openowner %p\n", oo
);
3842 * We know that we hold one reference via nfsd4_close, and another
3843 * "persistent" reference for the client. If the refcount is higher
3844 * than 2, then there are still calls in progress that are using this
3845 * stateid. We can't put the sc_file reference until they are finished.
3846 * Wait for the refcount to drop to 2. Since it has been unhashed,
3847 * there should be no danger of the refcount going back up again at
3850 wait_event(close_wq
, refcount_read(&s
->st_stid
.sc_count
) == 2);
3852 release_all_access(s
);
3853 if (s
->st_stid
.sc_file
) {
3854 put_nfs4_file(s
->st_stid
.sc_file
);
3855 s
->st_stid
.sc_file
= NULL
;
3858 spin_lock(&nn
->client_lock
);
3859 last
= oo
->oo_last_closed_stid
;
3860 oo
->oo_last_closed_stid
= s
;
3861 list_move_tail(&oo
->oo_close_lru
, &nn
->close_lru
);
3862 oo
->oo_time
= get_seconds();
3863 spin_unlock(&nn
->client_lock
);
3865 nfs4_put_stid(&last
->st_stid
);
3868 /* search file_hashtbl[] for file */
3869 static struct nfs4_file
*
3870 find_file_locked(struct knfsd_fh
*fh
, unsigned int hashval
)
3872 struct nfs4_file
*fp
;
3874 hlist_for_each_entry_rcu(fp
, &file_hashtbl
[hashval
], fi_hash
) {
3875 if (fh_match(&fp
->fi_fhandle
, fh
)) {
3876 if (refcount_inc_not_zero(&fp
->fi_ref
))
3884 find_file(struct knfsd_fh
*fh
)
3886 struct nfs4_file
*fp
;
3887 unsigned int hashval
= file_hashval(fh
);
3890 fp
= find_file_locked(fh
, hashval
);
3895 static struct nfs4_file
*
3896 find_or_add_file(struct nfs4_file
*new, struct knfsd_fh
*fh
)
3898 struct nfs4_file
*fp
;
3899 unsigned int hashval
= file_hashval(fh
);
3902 fp
= find_file_locked(fh
, hashval
);
3907 spin_lock(&state_lock
);
3908 fp
= find_file_locked(fh
, hashval
);
3909 if (likely(fp
== NULL
)) {
3910 nfsd4_init_file(fh
, hashval
, new);
3913 spin_unlock(&state_lock
);
3919 * Called to check deny when READ with all zero stateid or
3920 * WRITE with all zero or all one stateid
3923 nfs4_share_conflict(struct svc_fh
*current_fh
, unsigned int deny_type
)
3925 struct nfs4_file
*fp
;
3926 __be32 ret
= nfs_ok
;
3928 fp
= find_file(¤t_fh
->fh_handle
);
3931 /* Check for conflicting share reservations */
3932 spin_lock(&fp
->fi_lock
);
3933 if (fp
->fi_share_deny
& deny_type
)
3934 ret
= nfserr_locked
;
3935 spin_unlock(&fp
->fi_lock
);
3940 static void nfsd4_cb_recall_prepare(struct nfsd4_callback
*cb
)
3942 struct nfs4_delegation
*dp
= cb_to_delegation(cb
);
3943 struct nfsd_net
*nn
= net_generic(dp
->dl_stid
.sc_client
->net
,
3946 block_delegations(&dp
->dl_stid
.sc_file
->fi_fhandle
);
3949 * We can't do this in nfsd_break_deleg_cb because it is
3950 * already holding inode->i_lock.
3952 * If the dl_time != 0, then we know that it has already been
3953 * queued for a lease break. Don't queue it again.
3955 spin_lock(&state_lock
);
3956 if (dp
->dl_time
== 0) {
3957 dp
->dl_time
= get_seconds();
3958 list_add_tail(&dp
->dl_recall_lru
, &nn
->del_recall_lru
);
3960 spin_unlock(&state_lock
);
3963 static int nfsd4_cb_recall_done(struct nfsd4_callback
*cb
,
3964 struct rpc_task
*task
)
3966 struct nfs4_delegation
*dp
= cb_to_delegation(cb
);
3968 if (dp
->dl_stid
.sc_type
== NFS4_CLOSED_DELEG_STID
)
3971 switch (task
->tk_status
) {
3974 case -NFS4ERR_DELAY
:
3975 rpc_delay(task
, 2 * HZ
);
3978 case -NFS4ERR_BAD_STATEID
:
3980 * Race: client probably got cb_recall before open reply
3981 * granting delegation.
3983 if (dp
->dl_retries
--) {
3984 rpc_delay(task
, 2 * HZ
);
3993 static void nfsd4_cb_recall_release(struct nfsd4_callback
*cb
)
3995 struct nfs4_delegation
*dp
= cb_to_delegation(cb
);
3997 nfs4_put_stid(&dp
->dl_stid
);
4000 static const struct nfsd4_callback_ops nfsd4_cb_recall_ops
= {
4001 .prepare
= nfsd4_cb_recall_prepare
,
4002 .done
= nfsd4_cb_recall_done
,
4003 .release
= nfsd4_cb_recall_release
,
4006 static void nfsd_break_one_deleg(struct nfs4_delegation
*dp
)
4009 * We're assuming the state code never drops its reference
4010 * without first removing the lease. Since we're in this lease
4011 * callback (and since the lease code is serialized by the
4012 * i_lock) we know the server hasn't removed the lease yet, and
4013 * we know it's safe to take a reference.
4015 refcount_inc(&dp
->dl_stid
.sc_count
);
4016 nfsd4_run_cb(&dp
->dl_recall
);
4019 /* Called from break_lease() with i_lock held. */
4021 nfsd_break_deleg_cb(struct file_lock
*fl
)
4024 struct nfs4_delegation
*dp
= (struct nfs4_delegation
*)fl
->fl_owner
;
4025 struct nfs4_file
*fp
= dp
->dl_stid
.sc_file
;
4028 * We don't want the locks code to timeout the lease for us;
4029 * we'll remove it ourself if a delegation isn't returned
4032 fl
->fl_break_time
= 0;
4034 spin_lock(&fp
->fi_lock
);
4035 fp
->fi_had_conflict
= true;
4036 nfsd_break_one_deleg(dp
);
4037 spin_unlock(&fp
->fi_lock
);
4042 nfsd_change_deleg_cb(struct file_lock
*onlist
, int arg
,
4043 struct list_head
*dispose
)
4046 return lease_modify(onlist
, arg
, dispose
);
4051 static const struct lock_manager_operations nfsd_lease_mng_ops
= {
4052 .lm_break
= nfsd_break_deleg_cb
,
4053 .lm_change
= nfsd_change_deleg_cb
,
4056 static __be32
nfsd4_check_seqid(struct nfsd4_compound_state
*cstate
, struct nfs4_stateowner
*so
, u32 seqid
)
4058 if (nfsd4_has_session(cstate
))
4060 if (seqid
== so
->so_seqid
- 1)
4061 return nfserr_replay_me
;
4062 if (seqid
== so
->so_seqid
)
4064 return nfserr_bad_seqid
;
4067 static __be32
lookup_clientid(clientid_t
*clid
,
4068 struct nfsd4_compound_state
*cstate
,
4069 struct nfsd_net
*nn
)
4071 struct nfs4_client
*found
;
4074 found
= cstate
->clp
;
4075 if (!same_clid(&found
->cl_clientid
, clid
))
4076 return nfserr_stale_clientid
;
4080 if (STALE_CLIENTID(clid
, nn
))
4081 return nfserr_stale_clientid
;
4084 * For v4.1+ we get the client in the SEQUENCE op. If we don't have one
4085 * cached already then we know this is for is for v4.0 and "sessions"
4088 WARN_ON_ONCE(cstate
->session
);
4089 spin_lock(&nn
->client_lock
);
4090 found
= find_confirmed_client(clid
, false, nn
);
4092 spin_unlock(&nn
->client_lock
);
4093 return nfserr_expired
;
4095 atomic_inc(&found
->cl_refcount
);
4096 spin_unlock(&nn
->client_lock
);
4098 /* Cache the nfs4_client in cstate! */
4099 cstate
->clp
= found
;
4104 nfsd4_process_open1(struct nfsd4_compound_state
*cstate
,
4105 struct nfsd4_open
*open
, struct nfsd_net
*nn
)
4107 clientid_t
*clientid
= &open
->op_clientid
;
4108 struct nfs4_client
*clp
= NULL
;
4109 unsigned int strhashval
;
4110 struct nfs4_openowner
*oo
= NULL
;
4113 if (STALE_CLIENTID(&open
->op_clientid
, nn
))
4114 return nfserr_stale_clientid
;
4116 * In case we need it later, after we've already created the
4117 * file and don't want to risk a further failure:
4119 open
->op_file
= nfsd4_alloc_file();
4120 if (open
->op_file
== NULL
)
4121 return nfserr_jukebox
;
4123 status
= lookup_clientid(clientid
, cstate
, nn
);
4128 strhashval
= ownerstr_hashval(&open
->op_owner
);
4129 oo
= find_openstateowner_str(strhashval
, open
, clp
);
4130 open
->op_openowner
= oo
;
4134 if (!(oo
->oo_flags
& NFS4_OO_CONFIRMED
)) {
4135 /* Replace unconfirmed owners without checking for replay. */
4136 release_openowner(oo
);
4137 open
->op_openowner
= NULL
;
4140 status
= nfsd4_check_seqid(cstate
, &oo
->oo_owner
, open
->op_seqid
);
4145 oo
= alloc_init_open_stateowner(strhashval
, open
, cstate
);
4147 return nfserr_jukebox
;
4148 open
->op_openowner
= oo
;
4150 open
->op_stp
= nfs4_alloc_open_stateid(clp
);
4152 return nfserr_jukebox
;
4154 if (nfsd4_has_session(cstate
) &&
4155 (cstate
->current_fh
.fh_export
->ex_flags
& NFSEXP_PNFS
)) {
4156 open
->op_odstate
= alloc_clnt_odstate(clp
);
4157 if (!open
->op_odstate
)
4158 return nfserr_jukebox
;
4164 static inline __be32
4165 nfs4_check_delegmode(struct nfs4_delegation
*dp
, int flags
)
4167 if ((flags
& WR_STATE
) && (dp
->dl_type
== NFS4_OPEN_DELEGATE_READ
))
4168 return nfserr_openmode
;
4173 static int share_access_to_flags(u32 share_access
)
4175 return share_access
== NFS4_SHARE_ACCESS_READ
? RD_STATE
: WR_STATE
;
4178 static struct nfs4_delegation
*find_deleg_stateid(struct nfs4_client
*cl
, stateid_t
*s
)
4180 struct nfs4_stid
*ret
;
4182 ret
= find_stateid_by_type(cl
, s
,
4183 NFS4_DELEG_STID
|NFS4_REVOKED_DELEG_STID
);
4186 return delegstateid(ret
);
4189 static bool nfsd4_is_deleg_cur(struct nfsd4_open
*open
)
4191 return open
->op_claim_type
== NFS4_OPEN_CLAIM_DELEGATE_CUR
||
4192 open
->op_claim_type
== NFS4_OPEN_CLAIM_DELEG_CUR_FH
;
4196 nfs4_check_deleg(struct nfs4_client
*cl
, struct nfsd4_open
*open
,
4197 struct nfs4_delegation
**dp
)
4200 __be32 status
= nfserr_bad_stateid
;
4201 struct nfs4_delegation
*deleg
;
4203 deleg
= find_deleg_stateid(cl
, &open
->op_delegate_stateid
);
4206 if (deleg
->dl_stid
.sc_type
== NFS4_REVOKED_DELEG_STID
) {
4207 nfs4_put_stid(&deleg
->dl_stid
);
4208 if (cl
->cl_minorversion
)
4209 status
= nfserr_deleg_revoked
;
4212 flags
= share_access_to_flags(open
->op_share_access
);
4213 status
= nfs4_check_delegmode(deleg
, flags
);
4215 nfs4_put_stid(&deleg
->dl_stid
);
4220 if (!nfsd4_is_deleg_cur(open
))
4224 open
->op_openowner
->oo_flags
|= NFS4_OO_CONFIRMED
;
4228 static inline int nfs4_access_to_access(u32 nfs4_access
)
4232 if (nfs4_access
& NFS4_SHARE_ACCESS_READ
)
4233 flags
|= NFSD_MAY_READ
;
4234 if (nfs4_access
& NFS4_SHARE_ACCESS_WRITE
)
4235 flags
|= NFSD_MAY_WRITE
;
4239 static inline __be32
4240 nfsd4_truncate(struct svc_rqst
*rqstp
, struct svc_fh
*fh
,
4241 struct nfsd4_open
*open
)
4243 struct iattr iattr
= {
4244 .ia_valid
= ATTR_SIZE
,
4247 if (!open
->op_truncate
)
4249 if (!(open
->op_share_access
& NFS4_SHARE_ACCESS_WRITE
))
4250 return nfserr_inval
;
4251 return nfsd_setattr(rqstp
, fh
, &iattr
, 0, (time_t)0);
4254 static __be32
nfs4_get_vfs_file(struct svc_rqst
*rqstp
, struct nfs4_file
*fp
,
4255 struct svc_fh
*cur_fh
, struct nfs4_ol_stateid
*stp
,
4256 struct nfsd4_open
*open
)
4258 struct file
*filp
= NULL
;
4260 int oflag
= nfs4_access_to_omode(open
->op_share_access
);
4261 int access
= nfs4_access_to_access(open
->op_share_access
);
4262 unsigned char old_access_bmap
, old_deny_bmap
;
4264 spin_lock(&fp
->fi_lock
);
4267 * Are we trying to set a deny mode that would conflict with
4270 status
= nfs4_file_check_deny(fp
, open
->op_share_deny
);
4271 if (status
!= nfs_ok
) {
4272 spin_unlock(&fp
->fi_lock
);
4276 /* set access to the file */
4277 status
= nfs4_file_get_access(fp
, open
->op_share_access
);
4278 if (status
!= nfs_ok
) {
4279 spin_unlock(&fp
->fi_lock
);
4283 /* Set access bits in stateid */
4284 old_access_bmap
= stp
->st_access_bmap
;
4285 set_access(open
->op_share_access
, stp
);
4287 /* Set new deny mask */
4288 old_deny_bmap
= stp
->st_deny_bmap
;
4289 set_deny(open
->op_share_deny
, stp
);
4290 fp
->fi_share_deny
|= (open
->op_share_deny
& NFS4_SHARE_DENY_BOTH
);
4292 if (!fp
->fi_fds
[oflag
]) {
4293 spin_unlock(&fp
->fi_lock
);
4294 status
= nfsd_open(rqstp
, cur_fh
, S_IFREG
, access
, &filp
);
4296 goto out_put_access
;
4297 spin_lock(&fp
->fi_lock
);
4298 if (!fp
->fi_fds
[oflag
]) {
4299 fp
->fi_fds
[oflag
] = filp
;
4303 spin_unlock(&fp
->fi_lock
);
4307 status
= nfsd4_truncate(rqstp
, cur_fh
, open
);
4309 goto out_put_access
;
4313 stp
->st_access_bmap
= old_access_bmap
;
4314 nfs4_file_put_access(fp
, open
->op_share_access
);
4315 reset_union_bmap_deny(bmap_to_share_mode(old_deny_bmap
), stp
);
4320 nfs4_upgrade_open(struct svc_rqst
*rqstp
, struct nfs4_file
*fp
, struct svc_fh
*cur_fh
, struct nfs4_ol_stateid
*stp
, struct nfsd4_open
*open
)
4323 unsigned char old_deny_bmap
= stp
->st_deny_bmap
;
4325 if (!test_access(open
->op_share_access
, stp
))
4326 return nfs4_get_vfs_file(rqstp
, fp
, cur_fh
, stp
, open
);
4328 /* test and set deny mode */
4329 spin_lock(&fp
->fi_lock
);
4330 status
= nfs4_file_check_deny(fp
, open
->op_share_deny
);
4331 if (status
== nfs_ok
) {
4332 set_deny(open
->op_share_deny
, stp
);
4333 fp
->fi_share_deny
|=
4334 (open
->op_share_deny
& NFS4_SHARE_DENY_BOTH
);
4336 spin_unlock(&fp
->fi_lock
);
4338 if (status
!= nfs_ok
)
4341 status
= nfsd4_truncate(rqstp
, cur_fh
, open
);
4342 if (status
!= nfs_ok
)
4343 reset_union_bmap_deny(old_deny_bmap
, stp
);
4347 /* Should we give out recallable state?: */
4348 static bool nfsd4_cb_channel_good(struct nfs4_client
*clp
)
4350 if (clp
->cl_cb_state
== NFSD4_CB_UP
)
4353 * In the sessions case, since we don't have to establish a
4354 * separate connection for callbacks, we assume it's OK
4355 * until we hear otherwise:
4357 return clp
->cl_minorversion
&& clp
->cl_cb_state
== NFSD4_CB_UNKNOWN
;
4360 static struct file_lock
*nfs4_alloc_init_lease(struct nfs4_delegation
*dp
,
4363 struct file_lock
*fl
;
4365 fl
= locks_alloc_lock();
4368 fl
->fl_lmops
= &nfsd_lease_mng_ops
;
4369 fl
->fl_flags
= FL_DELEG
;
4370 fl
->fl_type
= flag
== NFS4_OPEN_DELEGATE_READ
? F_RDLCK
: F_WRLCK
;
4371 fl
->fl_end
= OFFSET_MAX
;
4372 fl
->fl_owner
= (fl_owner_t
)dp
;
4373 fl
->fl_pid
= current
->tgid
;
4374 fl
->fl_file
= dp
->dl_stid
.sc_file
->fi_deleg_file
;
4378 static struct nfs4_delegation
*
4379 nfs4_set_delegation(struct nfs4_client
*clp
, struct svc_fh
*fh
,
4380 struct nfs4_file
*fp
, struct nfs4_clnt_odstate
*odstate
)
4383 struct nfs4_delegation
*dp
;
4385 struct file_lock
*fl
;
4388 * The fi_had_conflict and nfs_get_existing_delegation checks
4389 * here are just optimizations; we'll need to recheck them at
4392 if (fp
->fi_had_conflict
)
4393 return ERR_PTR(-EAGAIN
);
4395 filp
= find_readable_file(fp
);
4397 /* We should always have a readable file here */
4399 return ERR_PTR(-EBADF
);
4401 spin_lock(&state_lock
);
4402 spin_lock(&fp
->fi_lock
);
4403 if (nfs4_delegation_exists(clp
, fp
))
4405 else if (!fp
->fi_deleg_file
) {
4406 fp
->fi_deleg_file
= filp
;
4407 /* increment early to prevent fi_deleg_file from being
4409 fp
->fi_delegees
= 1;
4413 spin_unlock(&fp
->fi_lock
);
4414 spin_unlock(&state_lock
);
4418 return ERR_PTR(status
);
4421 dp
= alloc_init_deleg(clp
, fp
, fh
, odstate
);
4425 fl
= nfs4_alloc_init_lease(dp
, NFS4_OPEN_DELEGATE_READ
);
4427 goto out_clnt_odstate
;
4429 status
= vfs_setlease(fp
->fi_deleg_file
, fl
->fl_type
, &fl
, NULL
);
4431 locks_free_lock(fl
);
4433 goto out_clnt_odstate
;
4435 spin_lock(&state_lock
);
4436 spin_lock(&fp
->fi_lock
);
4437 if (fp
->fi_had_conflict
)
4440 status
= hash_delegation_locked(dp
, fp
);
4441 spin_unlock(&fp
->fi_lock
);
4442 spin_unlock(&state_lock
);
4449 vfs_setlease(fp
->fi_deleg_file
, F_UNLCK
, NULL
, (void **)&dp
);
4451 put_clnt_odstate(dp
->dl_clnt_odstate
);
4452 nfs4_put_stid(&dp
->dl_stid
);
4455 return ERR_PTR(status
);
4458 static void nfsd4_open_deleg_none_ext(struct nfsd4_open
*open
, int status
)
4460 open
->op_delegate_type
= NFS4_OPEN_DELEGATE_NONE_EXT
;
4461 if (status
== -EAGAIN
)
4462 open
->op_why_no_deleg
= WND4_CONTENTION
;
4464 open
->op_why_no_deleg
= WND4_RESOURCE
;
4465 switch (open
->op_deleg_want
) {
4466 case NFS4_SHARE_WANT_READ_DELEG
:
4467 case NFS4_SHARE_WANT_WRITE_DELEG
:
4468 case NFS4_SHARE_WANT_ANY_DELEG
:
4470 case NFS4_SHARE_WANT_CANCEL
:
4471 open
->op_why_no_deleg
= WND4_CANCELLED
;
4473 case NFS4_SHARE_WANT_NO_DELEG
:
4480 * Attempt to hand out a delegation.
4482 * Note we don't support write delegations, and won't until the vfs has
4483 * proper support for them.
4486 nfs4_open_delegation(struct svc_fh
*fh
, struct nfsd4_open
*open
,
4487 struct nfs4_ol_stateid
*stp
)
4489 struct nfs4_delegation
*dp
;
4490 struct nfs4_openowner
*oo
= openowner(stp
->st_stateowner
);
4491 struct nfs4_client
*clp
= stp
->st_stid
.sc_client
;
4495 cb_up
= nfsd4_cb_channel_good(oo
->oo_owner
.so_client
);
4496 open
->op_recall
= 0;
4497 switch (open
->op_claim_type
) {
4498 case NFS4_OPEN_CLAIM_PREVIOUS
:
4500 open
->op_recall
= 1;
4501 if (open
->op_delegate_type
!= NFS4_OPEN_DELEGATE_READ
)
4504 case NFS4_OPEN_CLAIM_NULL
:
4505 case NFS4_OPEN_CLAIM_FH
:
4507 * Let's not give out any delegations till everyone's
4508 * had the chance to reclaim theirs, *and* until
4509 * NLM locks have all been reclaimed:
4511 if (locks_in_grace(clp
->net
))
4513 if (!cb_up
|| !(oo
->oo_flags
& NFS4_OO_CONFIRMED
))
4516 * Also, if the file was opened for write or
4517 * create, there's a good chance the client's
4518 * about to write to it, resulting in an
4519 * immediate recall (since we don't support
4520 * write delegations):
4522 if (open
->op_share_access
& NFS4_SHARE_ACCESS_WRITE
)
4524 if (open
->op_create
== NFS4_OPEN_CREATE
)
4530 dp
= nfs4_set_delegation(clp
, fh
, stp
->st_stid
.sc_file
, stp
->st_clnt_odstate
);
4534 memcpy(&open
->op_delegate_stateid
, &dp
->dl_stid
.sc_stateid
, sizeof(dp
->dl_stid
.sc_stateid
));
4536 dprintk("NFSD: delegation stateid=" STATEID_FMT
"\n",
4537 STATEID_VAL(&dp
->dl_stid
.sc_stateid
));
4538 open
->op_delegate_type
= NFS4_OPEN_DELEGATE_READ
;
4539 nfs4_put_stid(&dp
->dl_stid
);
4542 open
->op_delegate_type
= NFS4_OPEN_DELEGATE_NONE
;
4543 if (open
->op_claim_type
== NFS4_OPEN_CLAIM_PREVIOUS
&&
4544 open
->op_delegate_type
!= NFS4_OPEN_DELEGATE_NONE
) {
4545 dprintk("NFSD: WARNING: refusing delegation reclaim\n");
4546 open
->op_recall
= 1;
4549 /* 4.1 client asking for a delegation? */
4550 if (open
->op_deleg_want
)
4551 nfsd4_open_deleg_none_ext(open
, status
);
4555 static void nfsd4_deleg_xgrade_none_ext(struct nfsd4_open
*open
,
4556 struct nfs4_delegation
*dp
)
4558 if (open
->op_deleg_want
== NFS4_SHARE_WANT_READ_DELEG
&&
4559 dp
->dl_type
== NFS4_OPEN_DELEGATE_WRITE
) {
4560 open
->op_delegate_type
= NFS4_OPEN_DELEGATE_NONE_EXT
;
4561 open
->op_why_no_deleg
= WND4_NOT_SUPP_DOWNGRADE
;
4562 } else if (open
->op_deleg_want
== NFS4_SHARE_WANT_WRITE_DELEG
&&
4563 dp
->dl_type
== NFS4_OPEN_DELEGATE_WRITE
) {
4564 open
->op_delegate_type
= NFS4_OPEN_DELEGATE_NONE_EXT
;
4565 open
->op_why_no_deleg
= WND4_NOT_SUPP_UPGRADE
;
4567 /* Otherwise the client must be confused wanting a delegation
4568 * it already has, therefore we don't return
4569 * NFS4_OPEN_DELEGATE_NONE_EXT and reason.
4574 nfsd4_process_open2(struct svc_rqst
*rqstp
, struct svc_fh
*current_fh
, struct nfsd4_open
*open
)
4576 struct nfsd4_compoundres
*resp
= rqstp
->rq_resp
;
4577 struct nfs4_client
*cl
= open
->op_openowner
->oo_owner
.so_client
;
4578 struct nfs4_file
*fp
= NULL
;
4579 struct nfs4_ol_stateid
*stp
= NULL
;
4580 struct nfs4_delegation
*dp
= NULL
;
4582 bool new_stp
= false;
4585 * Lookup file; if found, lookup stateid and check open request,
4586 * and check for delegations in the process of being recalled.
4587 * If not found, create the nfs4_file struct
4589 fp
= find_or_add_file(open
->op_file
, ¤t_fh
->fh_handle
);
4590 if (fp
!= open
->op_file
) {
4591 status
= nfs4_check_deleg(cl
, open
, &dp
);
4594 stp
= nfsd4_find_and_lock_existing_open(fp
, open
);
4596 open
->op_file
= NULL
;
4597 status
= nfserr_bad_stateid
;
4598 if (nfsd4_is_deleg_cur(open
))
4603 stp
= init_open_stateid(fp
, open
);
4609 * OPEN the file, or upgrade an existing OPEN.
4610 * If truncate fails, the OPEN fails.
4612 * stp is already locked.
4615 /* Stateid was found, this is an OPEN upgrade */
4616 status
= nfs4_upgrade_open(rqstp
, fp
, current_fh
, stp
, open
);
4618 mutex_unlock(&stp
->st_mutex
);
4622 status
= nfs4_get_vfs_file(rqstp
, fp
, current_fh
, stp
, open
);
4624 stp
->st_stid
.sc_type
= NFS4_CLOSED_STID
;
4625 release_open_stateid(stp
);
4626 mutex_unlock(&stp
->st_mutex
);
4630 stp
->st_clnt_odstate
= find_or_hash_clnt_odstate(fp
,
4632 if (stp
->st_clnt_odstate
== open
->op_odstate
)
4633 open
->op_odstate
= NULL
;
4636 nfs4_inc_and_copy_stateid(&open
->op_stateid
, &stp
->st_stid
);
4637 mutex_unlock(&stp
->st_mutex
);
4639 if (nfsd4_has_session(&resp
->cstate
)) {
4640 if (open
->op_deleg_want
& NFS4_SHARE_WANT_NO_DELEG
) {
4641 open
->op_delegate_type
= NFS4_OPEN_DELEGATE_NONE_EXT
;
4642 open
->op_why_no_deleg
= WND4_NOT_WANTED
;
4648 * Attempt to hand out a delegation. No error return, because the
4649 * OPEN succeeds even if we fail.
4651 nfs4_open_delegation(current_fh
, open
, stp
);
4655 dprintk("%s: stateid=" STATEID_FMT
"\n", __func__
,
4656 STATEID_VAL(&stp
->st_stid
.sc_stateid
));
4658 /* 4.1 client trying to upgrade/downgrade delegation? */
4659 if (open
->op_delegate_type
== NFS4_OPEN_DELEGATE_NONE
&& dp
&&
4660 open
->op_deleg_want
)
4661 nfsd4_deleg_xgrade_none_ext(open
, dp
);
4665 if (status
== 0 && open
->op_claim_type
== NFS4_OPEN_CLAIM_PREVIOUS
)
4666 open
->op_openowner
->oo_flags
|= NFS4_OO_CONFIRMED
;
4668 * To finish the open response, we just need to set the rflags.
4670 open
->op_rflags
= NFS4_OPEN_RESULT_LOCKTYPE_POSIX
;
4671 if (nfsd4_has_session(&resp
->cstate
))
4672 open
->op_rflags
|= NFS4_OPEN_RESULT_MAY_NOTIFY_LOCK
;
4673 else if (!(open
->op_openowner
->oo_flags
& NFS4_OO_CONFIRMED
))
4674 open
->op_rflags
|= NFS4_OPEN_RESULT_CONFIRM
;
4677 nfs4_put_stid(&dp
->dl_stid
);
4679 nfs4_put_stid(&stp
->st_stid
);
4684 void nfsd4_cleanup_open_state(struct nfsd4_compound_state
*cstate
,
4685 struct nfsd4_open
*open
)
4687 if (open
->op_openowner
) {
4688 struct nfs4_stateowner
*so
= &open
->op_openowner
->oo_owner
;
4690 nfsd4_cstate_assign_replay(cstate
, so
);
4691 nfs4_put_stateowner(so
);
4694 kmem_cache_free(file_slab
, open
->op_file
);
4696 nfs4_put_stid(&open
->op_stp
->st_stid
);
4697 if (open
->op_odstate
)
4698 kmem_cache_free(odstate_slab
, open
->op_odstate
);
4702 nfsd4_renew(struct svc_rqst
*rqstp
, struct nfsd4_compound_state
*cstate
,
4703 union nfsd4_op_u
*u
)
4705 clientid_t
*clid
= &u
->renew
;
4706 struct nfs4_client
*clp
;
4708 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
4710 dprintk("process_renew(%08x/%08x): starting\n",
4711 clid
->cl_boot
, clid
->cl_id
);
4712 status
= lookup_clientid(clid
, cstate
, nn
);
4716 status
= nfserr_cb_path_down
;
4717 if (!list_empty(&clp
->cl_delegations
)
4718 && clp
->cl_cb_state
!= NFSD4_CB_UP
)
4726 nfsd4_end_grace(struct nfsd_net
*nn
)
4728 /* do nothing if grace period already ended */
4729 if (nn
->grace_ended
)
4732 nn
->grace_ended
= true;
4734 * If the server goes down again right now, an NFSv4
4735 * client will still be allowed to reclaim after it comes back up,
4736 * even if it hasn't yet had a chance to reclaim state this time.
4739 nfsd4_record_grace_done(nn
);
4741 * At this point, NFSv4 clients can still reclaim. But if the
4742 * server crashes, any that have not yet reclaimed will be out
4743 * of luck on the next boot.
4745 * (NFSv4.1+ clients are considered to have reclaimed once they
4746 * call RECLAIM_COMPLETE. NFSv4.0 clients are considered to
4747 * have reclaimed after their first OPEN.)
4749 locks_end_grace(&nn
->nfsd4_manager
);
4751 * At this point, and once lockd and/or any other containers
4752 * exit their grace period, further reclaims will fail and
4753 * regular locking can resume.
4758 * If we've waited a lease period but there are still clients trying to
4759 * reclaim, wait a little longer to give them a chance to finish.
4761 static bool clients_still_reclaiming(struct nfsd_net
*nn
)
4763 unsigned long now
= get_seconds();
4764 unsigned long double_grace_period_end
= nn
->boot_time
+
4765 2 * nn
->nfsd4_lease
;
4767 if (nn
->track_reclaim_completes
&&
4768 atomic_read(&nn
->nr_reclaim_complete
) ==
4769 nn
->reclaim_str_hashtbl_size
)
4771 if (!nn
->somebody_reclaimed
)
4773 nn
->somebody_reclaimed
= false;
4775 * If we've given them *two* lease times to reclaim, and they're
4776 * still not done, give up:
4778 if (time_after(now
, double_grace_period_end
))
4784 nfs4_laundromat(struct nfsd_net
*nn
)
4786 struct nfs4_client
*clp
;
4787 struct nfs4_openowner
*oo
;
4788 struct nfs4_delegation
*dp
;
4789 struct nfs4_ol_stateid
*stp
;
4790 struct nfsd4_blocked_lock
*nbl
;
4791 struct list_head
*pos
, *next
, reaplist
;
4792 time_t cutoff
= get_seconds() - nn
->nfsd4_lease
;
4793 time_t t
, new_timeo
= nn
->nfsd4_lease
;
4795 dprintk("NFSD: laundromat service - starting\n");
4797 if (clients_still_reclaiming(nn
)) {
4801 dprintk("NFSD: end of grace period\n");
4802 nfsd4_end_grace(nn
);
4803 INIT_LIST_HEAD(&reaplist
);
4804 spin_lock(&nn
->client_lock
);
4805 list_for_each_safe(pos
, next
, &nn
->client_lru
) {
4806 clp
= list_entry(pos
, struct nfs4_client
, cl_lru
);
4807 if (time_after((unsigned long)clp
->cl_time
, (unsigned long)cutoff
)) {
4808 t
= clp
->cl_time
- cutoff
;
4809 new_timeo
= min(new_timeo
, t
);
4812 if (mark_client_expired_locked(clp
)) {
4813 dprintk("NFSD: client in use (clientid %08x)\n",
4814 clp
->cl_clientid
.cl_id
);
4817 list_add(&clp
->cl_lru
, &reaplist
);
4819 spin_unlock(&nn
->client_lock
);
4820 list_for_each_safe(pos
, next
, &reaplist
) {
4821 clp
= list_entry(pos
, struct nfs4_client
, cl_lru
);
4822 dprintk("NFSD: purging unused client (clientid %08x)\n",
4823 clp
->cl_clientid
.cl_id
);
4824 list_del_init(&clp
->cl_lru
);
4827 spin_lock(&state_lock
);
4828 list_for_each_safe(pos
, next
, &nn
->del_recall_lru
) {
4829 dp
= list_entry (pos
, struct nfs4_delegation
, dl_recall_lru
);
4830 if (time_after((unsigned long)dp
->dl_time
, (unsigned long)cutoff
)) {
4831 t
= dp
->dl_time
- cutoff
;
4832 new_timeo
= min(new_timeo
, t
);
4835 WARN_ON(!unhash_delegation_locked(dp
));
4836 list_add(&dp
->dl_recall_lru
, &reaplist
);
4838 spin_unlock(&state_lock
);
4839 while (!list_empty(&reaplist
)) {
4840 dp
= list_first_entry(&reaplist
, struct nfs4_delegation
,
4842 list_del_init(&dp
->dl_recall_lru
);
4843 revoke_delegation(dp
);
4846 spin_lock(&nn
->client_lock
);
4847 while (!list_empty(&nn
->close_lru
)) {
4848 oo
= list_first_entry(&nn
->close_lru
, struct nfs4_openowner
,
4850 if (time_after((unsigned long)oo
->oo_time
,
4851 (unsigned long)cutoff
)) {
4852 t
= oo
->oo_time
- cutoff
;
4853 new_timeo
= min(new_timeo
, t
);
4856 list_del_init(&oo
->oo_close_lru
);
4857 stp
= oo
->oo_last_closed_stid
;
4858 oo
->oo_last_closed_stid
= NULL
;
4859 spin_unlock(&nn
->client_lock
);
4860 nfs4_put_stid(&stp
->st_stid
);
4861 spin_lock(&nn
->client_lock
);
4863 spin_unlock(&nn
->client_lock
);
4866 * It's possible for a client to try and acquire an already held lock
4867 * that is being held for a long time, and then lose interest in it.
4868 * So, we clean out any un-revisited request after a lease period
4869 * under the assumption that the client is no longer interested.
4871 * RFC5661, sec. 9.6 states that the client must not rely on getting
4872 * notifications and must continue to poll for locks, even when the
4873 * server supports them. Thus this shouldn't lead to clients blocking
4874 * indefinitely once the lock does become free.
4876 BUG_ON(!list_empty(&reaplist
));
4877 spin_lock(&nn
->blocked_locks_lock
);
4878 while (!list_empty(&nn
->blocked_locks_lru
)) {
4879 nbl
= list_first_entry(&nn
->blocked_locks_lru
,
4880 struct nfsd4_blocked_lock
, nbl_lru
);
4881 if (time_after((unsigned long)nbl
->nbl_time
,
4882 (unsigned long)cutoff
)) {
4883 t
= nbl
->nbl_time
- cutoff
;
4884 new_timeo
= min(new_timeo
, t
);
4887 list_move(&nbl
->nbl_lru
, &reaplist
);
4888 list_del_init(&nbl
->nbl_list
);
4890 spin_unlock(&nn
->blocked_locks_lock
);
4892 while (!list_empty(&reaplist
)) {
4893 nbl
= list_first_entry(&reaplist
,
4894 struct nfsd4_blocked_lock
, nbl_lru
);
4895 list_del_init(&nbl
->nbl_lru
);
4896 free_blocked_lock(nbl
);
4899 new_timeo
= max_t(time_t, new_timeo
, NFSD_LAUNDROMAT_MINTIMEOUT
);
4903 static struct workqueue_struct
*laundry_wq
;
4904 static void laundromat_main(struct work_struct
*);
4907 laundromat_main(struct work_struct
*laundry
)
4910 struct delayed_work
*dwork
= to_delayed_work(laundry
);
4911 struct nfsd_net
*nn
= container_of(dwork
, struct nfsd_net
,
4914 t
= nfs4_laundromat(nn
);
4915 dprintk("NFSD: laundromat_main - sleeping for %ld seconds\n", t
);
4916 queue_delayed_work(laundry_wq
, &nn
->laundromat_work
, t
*HZ
);
4919 static inline __be32
nfs4_check_fh(struct svc_fh
*fhp
, struct nfs4_stid
*stp
)
4921 if (!fh_match(&fhp
->fh_handle
, &stp
->sc_file
->fi_fhandle
))
4922 return nfserr_bad_stateid
;
4927 access_permit_read(struct nfs4_ol_stateid
*stp
)
4929 return test_access(NFS4_SHARE_ACCESS_READ
, stp
) ||
4930 test_access(NFS4_SHARE_ACCESS_BOTH
, stp
) ||
4931 test_access(NFS4_SHARE_ACCESS_WRITE
, stp
);
4935 access_permit_write(struct nfs4_ol_stateid
*stp
)
4937 return test_access(NFS4_SHARE_ACCESS_WRITE
, stp
) ||
4938 test_access(NFS4_SHARE_ACCESS_BOTH
, stp
);
4942 __be32
nfs4_check_openmode(struct nfs4_ol_stateid
*stp
, int flags
)
4944 __be32 status
= nfserr_openmode
;
4946 /* For lock stateid's, we test the parent open, not the lock: */
4947 if (stp
->st_openstp
)
4948 stp
= stp
->st_openstp
;
4949 if ((flags
& WR_STATE
) && !access_permit_write(stp
))
4951 if ((flags
& RD_STATE
) && !access_permit_read(stp
))
4958 static inline __be32
4959 check_special_stateids(struct net
*net
, svc_fh
*current_fh
, stateid_t
*stateid
, int flags
)
4961 if (ONE_STATEID(stateid
) && (flags
& RD_STATE
))
4963 else if (opens_in_grace(net
)) {
4964 /* Answer in remaining cases depends on existence of
4965 * conflicting state; so we must wait out the grace period. */
4966 return nfserr_grace
;
4967 } else if (flags
& WR_STATE
)
4968 return nfs4_share_conflict(current_fh
,
4969 NFS4_SHARE_DENY_WRITE
);
4970 else /* (flags & RD_STATE) && ZERO_STATEID(stateid) */
4971 return nfs4_share_conflict(current_fh
,
4972 NFS4_SHARE_DENY_READ
);
4976 * Allow READ/WRITE during grace period on recovered state only for files
4977 * that are not able to provide mandatory locking.
4980 grace_disallows_io(struct net
*net
, struct inode
*inode
)
4982 return opens_in_grace(net
) && mandatory_lock(inode
);
4985 static __be32
check_stateid_generation(stateid_t
*in
, stateid_t
*ref
, bool has_session
)
4988 * When sessions are used the stateid generation number is ignored
4991 if (has_session
&& in
->si_generation
== 0)
4994 if (in
->si_generation
== ref
->si_generation
)
4997 /* If the client sends us a stateid from the future, it's buggy: */
4998 if (nfsd4_stateid_generation_after(in
, ref
))
4999 return nfserr_bad_stateid
;
5001 * However, we could see a stateid from the past, even from a
5002 * non-buggy client. For example, if the client sends a lock
5003 * while some IO is outstanding, the lock may bump si_generation
5004 * while the IO is still in flight. The client could avoid that
5005 * situation by waiting for responses on all the IO requests,
5006 * but better performance may result in retrying IO that
5007 * receives an old_stateid error if requests are rarely
5008 * reordered in flight:
5010 return nfserr_old_stateid
;
5013 static __be32
nfsd4_stid_check_stateid_generation(stateid_t
*in
, struct nfs4_stid
*s
, bool has_session
)
5017 spin_lock(&s
->sc_lock
);
5018 ret
= nfsd4_verify_open_stid(s
);
5020 ret
= check_stateid_generation(in
, &s
->sc_stateid
, has_session
);
5021 spin_unlock(&s
->sc_lock
);
5025 static __be32
nfsd4_check_openowner_confirmed(struct nfs4_ol_stateid
*ols
)
5027 if (ols
->st_stateowner
->so_is_open_owner
&&
5028 !(openowner(ols
->st_stateowner
)->oo_flags
& NFS4_OO_CONFIRMED
))
5029 return nfserr_bad_stateid
;
5033 static __be32
nfsd4_validate_stateid(struct nfs4_client
*cl
, stateid_t
*stateid
)
5035 struct nfs4_stid
*s
;
5036 __be32 status
= nfserr_bad_stateid
;
5038 if (ZERO_STATEID(stateid
) || ONE_STATEID(stateid
) ||
5039 CLOSE_STATEID(stateid
))
5041 /* Client debugging aid. */
5042 if (!same_clid(&stateid
->si_opaque
.so_clid
, &cl
->cl_clientid
)) {
5043 char addr_str
[INET6_ADDRSTRLEN
];
5044 rpc_ntop((struct sockaddr
*)&cl
->cl_addr
, addr_str
,
5046 pr_warn_ratelimited("NFSD: client %s testing state ID "
5047 "with incorrect client ID\n", addr_str
);
5050 spin_lock(&cl
->cl_lock
);
5051 s
= find_stateid_locked(cl
, stateid
);
5054 status
= nfsd4_stid_check_stateid_generation(stateid
, s
, 1);
5057 switch (s
->sc_type
) {
5058 case NFS4_DELEG_STID
:
5061 case NFS4_REVOKED_DELEG_STID
:
5062 status
= nfserr_deleg_revoked
;
5064 case NFS4_OPEN_STID
:
5065 case NFS4_LOCK_STID
:
5066 status
= nfsd4_check_openowner_confirmed(openlockstateid(s
));
5069 printk("unknown stateid type %x\n", s
->sc_type
);
5071 case NFS4_CLOSED_STID
:
5072 case NFS4_CLOSED_DELEG_STID
:
5073 status
= nfserr_bad_stateid
;
5076 spin_unlock(&cl
->cl_lock
);
5081 nfsd4_lookup_stateid(struct nfsd4_compound_state
*cstate
,
5082 stateid_t
*stateid
, unsigned char typemask
,
5083 struct nfs4_stid
**s
, struct nfsd_net
*nn
)
5086 bool return_revoked
= false;
5089 * only return revoked delegations if explicitly asked.
5090 * otherwise we report revoked or bad_stateid status.
5092 if (typemask
& NFS4_REVOKED_DELEG_STID
)
5093 return_revoked
= true;
5094 else if (typemask
& NFS4_DELEG_STID
)
5095 typemask
|= NFS4_REVOKED_DELEG_STID
;
5097 if (ZERO_STATEID(stateid
) || ONE_STATEID(stateid
) ||
5098 CLOSE_STATEID(stateid
))
5099 return nfserr_bad_stateid
;
5100 status
= lookup_clientid(&stateid
->si_opaque
.so_clid
, cstate
, nn
);
5101 if (status
== nfserr_stale_clientid
) {
5102 if (cstate
->session
)
5103 return nfserr_bad_stateid
;
5104 return nfserr_stale_stateid
;
5108 *s
= find_stateid_by_type(cstate
->clp
, stateid
, typemask
);
5110 return nfserr_bad_stateid
;
5111 if (((*s
)->sc_type
== NFS4_REVOKED_DELEG_STID
) && !return_revoked
) {
5113 if (cstate
->minorversion
)
5114 return nfserr_deleg_revoked
;
5115 return nfserr_bad_stateid
;
5120 static struct file
*
5121 nfs4_find_file(struct nfs4_stid
*s
, int flags
)
5126 switch (s
->sc_type
) {
5127 case NFS4_DELEG_STID
:
5128 if (WARN_ON_ONCE(!s
->sc_file
->fi_deleg_file
))
5130 return get_file(s
->sc_file
->fi_deleg_file
);
5131 case NFS4_OPEN_STID
:
5132 case NFS4_LOCK_STID
:
5133 if (flags
& RD_STATE
)
5134 return find_readable_file(s
->sc_file
);
5136 return find_writeable_file(s
->sc_file
);
5144 nfs4_check_olstateid(struct nfs4_ol_stateid
*ols
, int flags
)
5148 status
= nfsd4_check_openowner_confirmed(ols
);
5151 return nfs4_check_openmode(ols
, flags
);
5155 nfs4_check_file(struct svc_rqst
*rqstp
, struct svc_fh
*fhp
, struct nfs4_stid
*s
,
5156 struct file
**filpp
, bool *tmp_file
, int flags
)
5158 int acc
= (flags
& RD_STATE
) ? NFSD_MAY_READ
: NFSD_MAY_WRITE
;
5162 file
= nfs4_find_file(s
, flags
);
5164 status
= nfsd_permission(rqstp
, fhp
->fh_export
, fhp
->fh_dentry
,
5165 acc
| NFSD_MAY_OWNER_OVERRIDE
);
5173 status
= nfsd_open(rqstp
, fhp
, S_IFREG
, acc
, filpp
);
5185 * Checks for stateid operations
5188 nfs4_preprocess_stateid_op(struct svc_rqst
*rqstp
,
5189 struct nfsd4_compound_state
*cstate
, struct svc_fh
*fhp
,
5190 stateid_t
*stateid
, int flags
, struct file
**filpp
, bool *tmp_file
)
5192 struct inode
*ino
= d_inode(fhp
->fh_dentry
);
5193 struct net
*net
= SVC_NET(rqstp
);
5194 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
5195 struct nfs4_stid
*s
= NULL
;
5203 if (grace_disallows_io(net
, ino
))
5204 return nfserr_grace
;
5206 if (ZERO_STATEID(stateid
) || ONE_STATEID(stateid
)) {
5207 status
= check_special_stateids(net
, fhp
, stateid
, flags
);
5211 status
= nfsd4_lookup_stateid(cstate
, stateid
,
5212 NFS4_DELEG_STID
|NFS4_OPEN_STID
|NFS4_LOCK_STID
,
5216 status
= nfsd4_stid_check_stateid_generation(stateid
, s
,
5217 nfsd4_has_session(cstate
));
5221 switch (s
->sc_type
) {
5222 case NFS4_DELEG_STID
:
5223 status
= nfs4_check_delegmode(delegstateid(s
), flags
);
5225 case NFS4_OPEN_STID
:
5226 case NFS4_LOCK_STID
:
5227 status
= nfs4_check_olstateid(openlockstateid(s
), flags
);
5230 status
= nfserr_bad_stateid
;
5235 status
= nfs4_check_fh(fhp
, s
);
5238 if (!status
&& filpp
)
5239 status
= nfs4_check_file(rqstp
, fhp
, s
, filpp
, tmp_file
, flags
);
5247 * Test if the stateid is valid
5250 nfsd4_test_stateid(struct svc_rqst
*rqstp
, struct nfsd4_compound_state
*cstate
,
5251 union nfsd4_op_u
*u
)
5253 struct nfsd4_test_stateid
*test_stateid
= &u
->test_stateid
;
5254 struct nfsd4_test_stateid_id
*stateid
;
5255 struct nfs4_client
*cl
= cstate
->session
->se_client
;
5257 list_for_each_entry(stateid
, &test_stateid
->ts_stateid_list
, ts_id_list
)
5258 stateid
->ts_id_status
=
5259 nfsd4_validate_stateid(cl
, &stateid
->ts_id_stateid
);
5265 nfsd4_free_lock_stateid(stateid_t
*stateid
, struct nfs4_stid
*s
)
5267 struct nfs4_ol_stateid
*stp
= openlockstateid(s
);
5270 ret
= nfsd4_lock_ol_stateid(stp
);
5274 ret
= check_stateid_generation(stateid
, &s
->sc_stateid
, 1);
5278 ret
= nfserr_locks_held
;
5279 if (check_for_locks(stp
->st_stid
.sc_file
,
5280 lockowner(stp
->st_stateowner
)))
5283 release_lock_stateid(stp
);
5287 mutex_unlock(&stp
->st_mutex
);
5294 nfsd4_free_stateid(struct svc_rqst
*rqstp
, struct nfsd4_compound_state
*cstate
,
5295 union nfsd4_op_u
*u
)
5297 struct nfsd4_free_stateid
*free_stateid
= &u
->free_stateid
;
5298 stateid_t
*stateid
= &free_stateid
->fr_stateid
;
5299 struct nfs4_stid
*s
;
5300 struct nfs4_delegation
*dp
;
5301 struct nfs4_client
*cl
= cstate
->session
->se_client
;
5302 __be32 ret
= nfserr_bad_stateid
;
5304 spin_lock(&cl
->cl_lock
);
5305 s
= find_stateid_locked(cl
, stateid
);
5308 spin_lock(&s
->sc_lock
);
5309 switch (s
->sc_type
) {
5310 case NFS4_DELEG_STID
:
5311 ret
= nfserr_locks_held
;
5313 case NFS4_OPEN_STID
:
5314 ret
= check_stateid_generation(stateid
, &s
->sc_stateid
, 1);
5317 ret
= nfserr_locks_held
;
5319 case NFS4_LOCK_STID
:
5320 spin_unlock(&s
->sc_lock
);
5321 refcount_inc(&s
->sc_count
);
5322 spin_unlock(&cl
->cl_lock
);
5323 ret
= nfsd4_free_lock_stateid(stateid
, s
);
5325 case NFS4_REVOKED_DELEG_STID
:
5326 spin_unlock(&s
->sc_lock
);
5327 dp
= delegstateid(s
);
5328 list_del_init(&dp
->dl_recall_lru
);
5329 spin_unlock(&cl
->cl_lock
);
5333 /* Default falls through and returns nfserr_bad_stateid */
5335 spin_unlock(&s
->sc_lock
);
5337 spin_unlock(&cl
->cl_lock
);
5345 return (type
== NFS4_READW_LT
|| type
== NFS4_READ_LT
) ?
5346 RD_STATE
: WR_STATE
;
5349 static __be32
nfs4_seqid_op_checks(struct nfsd4_compound_state
*cstate
, stateid_t
*stateid
, u32 seqid
, struct nfs4_ol_stateid
*stp
)
5351 struct svc_fh
*current_fh
= &cstate
->current_fh
;
5352 struct nfs4_stateowner
*sop
= stp
->st_stateowner
;
5355 status
= nfsd4_check_seqid(cstate
, sop
, seqid
);
5358 status
= nfsd4_lock_ol_stateid(stp
);
5359 if (status
!= nfs_ok
)
5361 status
= check_stateid_generation(stateid
, &stp
->st_stid
.sc_stateid
, nfsd4_has_session(cstate
));
5362 if (status
== nfs_ok
)
5363 status
= nfs4_check_fh(current_fh
, &stp
->st_stid
);
5364 if (status
!= nfs_ok
)
5365 mutex_unlock(&stp
->st_mutex
);
5370 * Checks for sequence id mutating operations.
5373 nfs4_preprocess_seqid_op(struct nfsd4_compound_state
*cstate
, u32 seqid
,
5374 stateid_t
*stateid
, char typemask
,
5375 struct nfs4_ol_stateid
**stpp
,
5376 struct nfsd_net
*nn
)
5379 struct nfs4_stid
*s
;
5380 struct nfs4_ol_stateid
*stp
= NULL
;
5382 dprintk("NFSD: %s: seqid=%d stateid = " STATEID_FMT
"\n", __func__
,
5383 seqid
, STATEID_VAL(stateid
));
5386 status
= nfsd4_lookup_stateid(cstate
, stateid
, typemask
, &s
, nn
);
5389 stp
= openlockstateid(s
);
5390 nfsd4_cstate_assign_replay(cstate
, stp
->st_stateowner
);
5392 status
= nfs4_seqid_op_checks(cstate
, stateid
, seqid
, stp
);
5396 nfs4_put_stid(&stp
->st_stid
);
5400 static __be32
nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state
*cstate
, u32 seqid
,
5401 stateid_t
*stateid
, struct nfs4_ol_stateid
**stpp
, struct nfsd_net
*nn
)
5404 struct nfs4_openowner
*oo
;
5405 struct nfs4_ol_stateid
*stp
;
5407 status
= nfs4_preprocess_seqid_op(cstate
, seqid
, stateid
,
5408 NFS4_OPEN_STID
, &stp
, nn
);
5411 oo
= openowner(stp
->st_stateowner
);
5412 if (!(oo
->oo_flags
& NFS4_OO_CONFIRMED
)) {
5413 mutex_unlock(&stp
->st_mutex
);
5414 nfs4_put_stid(&stp
->st_stid
);
5415 return nfserr_bad_stateid
;
5422 nfsd4_open_confirm(struct svc_rqst
*rqstp
, struct nfsd4_compound_state
*cstate
,
5423 union nfsd4_op_u
*u
)
5425 struct nfsd4_open_confirm
*oc
= &u
->open_confirm
;
5427 struct nfs4_openowner
*oo
;
5428 struct nfs4_ol_stateid
*stp
;
5429 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
5431 dprintk("NFSD: nfsd4_open_confirm on file %pd\n",
5432 cstate
->current_fh
.fh_dentry
);
5434 status
= fh_verify(rqstp
, &cstate
->current_fh
, S_IFREG
, 0);
5438 status
= nfs4_preprocess_seqid_op(cstate
,
5439 oc
->oc_seqid
, &oc
->oc_req_stateid
,
5440 NFS4_OPEN_STID
, &stp
, nn
);
5443 oo
= openowner(stp
->st_stateowner
);
5444 status
= nfserr_bad_stateid
;
5445 if (oo
->oo_flags
& NFS4_OO_CONFIRMED
) {
5446 mutex_unlock(&stp
->st_mutex
);
5449 oo
->oo_flags
|= NFS4_OO_CONFIRMED
;
5450 nfs4_inc_and_copy_stateid(&oc
->oc_resp_stateid
, &stp
->st_stid
);
5451 mutex_unlock(&stp
->st_mutex
);
5452 dprintk("NFSD: %s: success, seqid=%d stateid=" STATEID_FMT
"\n",
5453 __func__
, oc
->oc_seqid
, STATEID_VAL(&stp
->st_stid
.sc_stateid
));
5455 nfsd4_client_record_create(oo
->oo_owner
.so_client
);
5458 nfs4_put_stid(&stp
->st_stid
);
5460 nfsd4_bump_seqid(cstate
, status
);
5464 static inline void nfs4_stateid_downgrade_bit(struct nfs4_ol_stateid
*stp
, u32 access
)
5466 if (!test_access(access
, stp
))
5468 nfs4_file_put_access(stp
->st_stid
.sc_file
, access
);
5469 clear_access(access
, stp
);
5472 static inline void nfs4_stateid_downgrade(struct nfs4_ol_stateid
*stp
, u32 to_access
)
5474 switch (to_access
) {
5475 case NFS4_SHARE_ACCESS_READ
:
5476 nfs4_stateid_downgrade_bit(stp
, NFS4_SHARE_ACCESS_WRITE
);
5477 nfs4_stateid_downgrade_bit(stp
, NFS4_SHARE_ACCESS_BOTH
);
5479 case NFS4_SHARE_ACCESS_WRITE
:
5480 nfs4_stateid_downgrade_bit(stp
, NFS4_SHARE_ACCESS_READ
);
5481 nfs4_stateid_downgrade_bit(stp
, NFS4_SHARE_ACCESS_BOTH
);
5483 case NFS4_SHARE_ACCESS_BOTH
:
5491 nfsd4_open_downgrade(struct svc_rqst
*rqstp
,
5492 struct nfsd4_compound_state
*cstate
, union nfsd4_op_u
*u
)
5494 struct nfsd4_open_downgrade
*od
= &u
->open_downgrade
;
5496 struct nfs4_ol_stateid
*stp
;
5497 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
5499 dprintk("NFSD: nfsd4_open_downgrade on file %pd\n",
5500 cstate
->current_fh
.fh_dentry
);
5502 /* We don't yet support WANT bits: */
5503 if (od
->od_deleg_want
)
5504 dprintk("NFSD: %s: od_deleg_want=0x%x ignored\n", __func__
,
5507 status
= nfs4_preprocess_confirmed_seqid_op(cstate
, od
->od_seqid
,
5508 &od
->od_stateid
, &stp
, nn
);
5511 status
= nfserr_inval
;
5512 if (!test_access(od
->od_share_access
, stp
)) {
5513 dprintk("NFSD: access not a subset of current bitmap: 0x%hhx, input access=%08x\n",
5514 stp
->st_access_bmap
, od
->od_share_access
);
5517 if (!test_deny(od
->od_share_deny
, stp
)) {
5518 dprintk("NFSD: deny not a subset of current bitmap: 0x%hhx, input deny=%08x\n",
5519 stp
->st_deny_bmap
, od
->od_share_deny
);
5522 nfs4_stateid_downgrade(stp
, od
->od_share_access
);
5523 reset_union_bmap_deny(od
->od_share_deny
, stp
);
5524 nfs4_inc_and_copy_stateid(&od
->od_stateid
, &stp
->st_stid
);
5527 mutex_unlock(&stp
->st_mutex
);
5528 nfs4_put_stid(&stp
->st_stid
);
5530 nfsd4_bump_seqid(cstate
, status
);
5534 static void nfsd4_close_open_stateid(struct nfs4_ol_stateid
*s
)
5536 struct nfs4_client
*clp
= s
->st_stid
.sc_client
;
5538 LIST_HEAD(reaplist
);
5540 spin_lock(&clp
->cl_lock
);
5541 unhashed
= unhash_open_stateid(s
, &reaplist
);
5543 if (clp
->cl_minorversion
) {
5545 put_ol_stateid_locked(s
, &reaplist
);
5546 spin_unlock(&clp
->cl_lock
);
5547 free_ol_stateid_reaplist(&reaplist
);
5549 spin_unlock(&clp
->cl_lock
);
5550 free_ol_stateid_reaplist(&reaplist
);
5552 move_to_close_lru(s
, clp
->net
);
5557 * nfs4_unlock_state() called after encode
5560 nfsd4_close(struct svc_rqst
*rqstp
, struct nfsd4_compound_state
*cstate
,
5561 union nfsd4_op_u
*u
)
5563 struct nfsd4_close
*close
= &u
->close
;
5565 struct nfs4_ol_stateid
*stp
;
5566 struct net
*net
= SVC_NET(rqstp
);
5567 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
5569 dprintk("NFSD: nfsd4_close on file %pd\n",
5570 cstate
->current_fh
.fh_dentry
);
5572 status
= nfs4_preprocess_seqid_op(cstate
, close
->cl_seqid
,
5574 NFS4_OPEN_STID
|NFS4_CLOSED_STID
,
5576 nfsd4_bump_seqid(cstate
, status
);
5580 stp
->st_stid
.sc_type
= NFS4_CLOSED_STID
;
5583 * Technically we don't _really_ have to increment or copy it, since
5584 * it should just be gone after this operation and we clobber the
5585 * copied value below, but we continue to do so here just to ensure
5586 * that racing ops see that there was a state change.
5588 nfs4_inc_and_copy_stateid(&close
->cl_stateid
, &stp
->st_stid
);
5590 nfsd4_close_open_stateid(stp
);
5591 mutex_unlock(&stp
->st_mutex
);
5593 /* v4.1+ suggests that we send a special stateid in here, since the
5594 * clients should just ignore this anyway. Since this is not useful
5595 * for v4.0 clients either, we set it to the special close_stateid
5598 * See RFC5661 section 18.2.4, and RFC7530 section 16.2.5
5600 memcpy(&close
->cl_stateid
, &close_stateid
, sizeof(close
->cl_stateid
));
5602 /* put reference from nfs4_preprocess_seqid_op */
5603 nfs4_put_stid(&stp
->st_stid
);
5609 nfsd4_delegreturn(struct svc_rqst
*rqstp
, struct nfsd4_compound_state
*cstate
,
5610 union nfsd4_op_u
*u
)
5612 struct nfsd4_delegreturn
*dr
= &u
->delegreturn
;
5613 struct nfs4_delegation
*dp
;
5614 stateid_t
*stateid
= &dr
->dr_stateid
;
5615 struct nfs4_stid
*s
;
5617 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
5619 if ((status
= fh_verify(rqstp
, &cstate
->current_fh
, S_IFREG
, 0)))
5622 status
= nfsd4_lookup_stateid(cstate
, stateid
, NFS4_DELEG_STID
, &s
, nn
);
5625 dp
= delegstateid(s
);
5626 status
= nfsd4_stid_check_stateid_generation(stateid
, &dp
->dl_stid
, nfsd4_has_session(cstate
));
5630 destroy_delegation(dp
);
5632 nfs4_put_stid(&dp
->dl_stid
);
5638 end_offset(u64 start
, u64 len
)
5643 return end
>= start
? end
: NFS4_MAX_UINT64
;
5646 /* last octet in a range */
5648 last_byte_offset(u64 start
, u64 len
)
5654 return end
> start
? end
- 1: NFS4_MAX_UINT64
;
5658 * TODO: Linux file offsets are _signed_ 64-bit quantities, which means that
5659 * we can't properly handle lock requests that go beyond the (2^63 - 1)-th
5660 * byte, because of sign extension problems. Since NFSv4 calls for 64-bit
5661 * locking, this prevents us from being completely protocol-compliant. The
5662 * real solution to this problem is to start using unsigned file offsets in
5663 * the VFS, but this is a very deep change!
5666 nfs4_transform_lock_offset(struct file_lock
*lock
)
5668 if (lock
->fl_start
< 0)
5669 lock
->fl_start
= OFFSET_MAX
;
5670 if (lock
->fl_end
< 0)
5671 lock
->fl_end
= OFFSET_MAX
;
5675 nfsd4_fl_get_owner(fl_owner_t owner
)
5677 struct nfs4_lockowner
*lo
= (struct nfs4_lockowner
*)owner
;
5679 nfs4_get_stateowner(&lo
->lo_owner
);
5684 nfsd4_fl_put_owner(fl_owner_t owner
)
5686 struct nfs4_lockowner
*lo
= (struct nfs4_lockowner
*)owner
;
5689 nfs4_put_stateowner(&lo
->lo_owner
);
5693 nfsd4_lm_notify(struct file_lock
*fl
)
5695 struct nfs4_lockowner
*lo
= (struct nfs4_lockowner
*)fl
->fl_owner
;
5696 struct net
*net
= lo
->lo_owner
.so_client
->net
;
5697 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
5698 struct nfsd4_blocked_lock
*nbl
= container_of(fl
,
5699 struct nfsd4_blocked_lock
, nbl_lock
);
5702 /* An empty list means that something else is going to be using it */
5703 spin_lock(&nn
->blocked_locks_lock
);
5704 if (!list_empty(&nbl
->nbl_list
)) {
5705 list_del_init(&nbl
->nbl_list
);
5706 list_del_init(&nbl
->nbl_lru
);
5709 spin_unlock(&nn
->blocked_locks_lock
);
5712 nfsd4_run_cb(&nbl
->nbl_cb
);
5715 static const struct lock_manager_operations nfsd_posix_mng_ops
= {
5716 .lm_notify
= nfsd4_lm_notify
,
5717 .lm_get_owner
= nfsd4_fl_get_owner
,
5718 .lm_put_owner
= nfsd4_fl_put_owner
,
5722 nfs4_set_lock_denied(struct file_lock
*fl
, struct nfsd4_lock_denied
*deny
)
5724 struct nfs4_lockowner
*lo
;
5726 if (fl
->fl_lmops
== &nfsd_posix_mng_ops
) {
5727 lo
= (struct nfs4_lockowner
*) fl
->fl_owner
;
5728 deny
->ld_owner
.data
= kmemdup(lo
->lo_owner
.so_owner
.data
,
5729 lo
->lo_owner
.so_owner
.len
, GFP_KERNEL
);
5730 if (!deny
->ld_owner
.data
)
5731 /* We just don't care that much */
5733 deny
->ld_owner
.len
= lo
->lo_owner
.so_owner
.len
;
5734 deny
->ld_clientid
= lo
->lo_owner
.so_client
->cl_clientid
;
5737 deny
->ld_owner
.len
= 0;
5738 deny
->ld_owner
.data
= NULL
;
5739 deny
->ld_clientid
.cl_boot
= 0;
5740 deny
->ld_clientid
.cl_id
= 0;
5742 deny
->ld_start
= fl
->fl_start
;
5743 deny
->ld_length
= NFS4_MAX_UINT64
;
5744 if (fl
->fl_end
!= NFS4_MAX_UINT64
)
5745 deny
->ld_length
= fl
->fl_end
- fl
->fl_start
+ 1;
5746 deny
->ld_type
= NFS4_READ_LT
;
5747 if (fl
->fl_type
!= F_RDLCK
)
5748 deny
->ld_type
= NFS4_WRITE_LT
;
5751 static struct nfs4_lockowner
*
5752 find_lockowner_str_locked(struct nfs4_client
*clp
, struct xdr_netobj
*owner
)
5754 unsigned int strhashval
= ownerstr_hashval(owner
);
5755 struct nfs4_stateowner
*so
;
5757 lockdep_assert_held(&clp
->cl_lock
);
5759 list_for_each_entry(so
, &clp
->cl_ownerstr_hashtbl
[strhashval
],
5761 if (so
->so_is_open_owner
)
5763 if (same_owner_str(so
, owner
))
5764 return lockowner(nfs4_get_stateowner(so
));
5769 static struct nfs4_lockowner
*
5770 find_lockowner_str(struct nfs4_client
*clp
, struct xdr_netobj
*owner
)
5772 struct nfs4_lockowner
*lo
;
5774 spin_lock(&clp
->cl_lock
);
5775 lo
= find_lockowner_str_locked(clp
, owner
);
5776 spin_unlock(&clp
->cl_lock
);
5780 static void nfs4_unhash_lockowner(struct nfs4_stateowner
*sop
)
5782 unhash_lockowner_locked(lockowner(sop
));
5785 static void nfs4_free_lockowner(struct nfs4_stateowner
*sop
)
5787 struct nfs4_lockowner
*lo
= lockowner(sop
);
5789 kmem_cache_free(lockowner_slab
, lo
);
5792 static const struct nfs4_stateowner_operations lockowner_ops
= {
5793 .so_unhash
= nfs4_unhash_lockowner
,
5794 .so_free
= nfs4_free_lockowner
,
5798 * Alloc a lock owner structure.
5799 * Called in nfsd4_lock - therefore, OPEN and OPEN_CONFIRM (if needed) has
5802 * strhashval = ownerstr_hashval
5804 static struct nfs4_lockowner
*
5805 alloc_init_lock_stateowner(unsigned int strhashval
, struct nfs4_client
*clp
,
5806 struct nfs4_ol_stateid
*open_stp
,
5807 struct nfsd4_lock
*lock
)
5809 struct nfs4_lockowner
*lo
, *ret
;
5811 lo
= alloc_stateowner(lockowner_slab
, &lock
->lk_new_owner
, clp
);
5814 INIT_LIST_HEAD(&lo
->lo_blocked
);
5815 INIT_LIST_HEAD(&lo
->lo_owner
.so_stateids
);
5816 lo
->lo_owner
.so_is_open_owner
= 0;
5817 lo
->lo_owner
.so_seqid
= lock
->lk_new_lock_seqid
;
5818 lo
->lo_owner
.so_ops
= &lockowner_ops
;
5819 spin_lock(&clp
->cl_lock
);
5820 ret
= find_lockowner_str_locked(clp
, &lock
->lk_new_owner
);
5822 list_add(&lo
->lo_owner
.so_strhash
,
5823 &clp
->cl_ownerstr_hashtbl
[strhashval
]);
5826 nfs4_free_stateowner(&lo
->lo_owner
);
5828 spin_unlock(&clp
->cl_lock
);
5832 static struct nfs4_ol_stateid
*
5833 find_lock_stateid(struct nfs4_lockowner
*lo
, struct nfs4_file
*fp
)
5835 struct nfs4_ol_stateid
*lst
;
5836 struct nfs4_client
*clp
= lo
->lo_owner
.so_client
;
5838 lockdep_assert_held(&clp
->cl_lock
);
5840 list_for_each_entry(lst
, &lo
->lo_owner
.so_stateids
, st_perstateowner
) {
5841 if (lst
->st_stid
.sc_type
!= NFS4_LOCK_STID
)
5843 if (lst
->st_stid
.sc_file
== fp
) {
5844 refcount_inc(&lst
->st_stid
.sc_count
);
5851 static struct nfs4_ol_stateid
*
5852 init_lock_stateid(struct nfs4_ol_stateid
*stp
, struct nfs4_lockowner
*lo
,
5853 struct nfs4_file
*fp
, struct inode
*inode
,
5854 struct nfs4_ol_stateid
*open_stp
)
5856 struct nfs4_client
*clp
= lo
->lo_owner
.so_client
;
5857 struct nfs4_ol_stateid
*retstp
;
5859 mutex_init(&stp
->st_mutex
);
5860 mutex_lock_nested(&stp
->st_mutex
, OPEN_STATEID_MUTEX
);
5862 spin_lock(&clp
->cl_lock
);
5863 spin_lock(&fp
->fi_lock
);
5864 retstp
= find_lock_stateid(lo
, fp
);
5868 refcount_inc(&stp
->st_stid
.sc_count
);
5869 stp
->st_stid
.sc_type
= NFS4_LOCK_STID
;
5870 stp
->st_stateowner
= nfs4_get_stateowner(&lo
->lo_owner
);
5872 stp
->st_stid
.sc_file
= fp
;
5873 stp
->st_access_bmap
= 0;
5874 stp
->st_deny_bmap
= open_stp
->st_deny_bmap
;
5875 stp
->st_openstp
= open_stp
;
5876 list_add(&stp
->st_locks
, &open_stp
->st_locks
);
5877 list_add(&stp
->st_perstateowner
, &lo
->lo_owner
.so_stateids
);
5878 list_add(&stp
->st_perfile
, &fp
->fi_stateids
);
5880 spin_unlock(&fp
->fi_lock
);
5881 spin_unlock(&clp
->cl_lock
);
5883 if (nfsd4_lock_ol_stateid(retstp
) != nfs_ok
) {
5884 nfs4_put_stid(&retstp
->st_stid
);
5887 /* To keep mutex tracking happy */
5888 mutex_unlock(&stp
->st_mutex
);
5894 static struct nfs4_ol_stateid
*
5895 find_or_create_lock_stateid(struct nfs4_lockowner
*lo
, struct nfs4_file
*fi
,
5896 struct inode
*inode
, struct nfs4_ol_stateid
*ost
,
5899 struct nfs4_stid
*ns
= NULL
;
5900 struct nfs4_ol_stateid
*lst
;
5901 struct nfs4_openowner
*oo
= openowner(ost
->st_stateowner
);
5902 struct nfs4_client
*clp
= oo
->oo_owner
.so_client
;
5905 spin_lock(&clp
->cl_lock
);
5906 lst
= find_lock_stateid(lo
, fi
);
5907 spin_unlock(&clp
->cl_lock
);
5909 if (nfsd4_lock_ol_stateid(lst
) == nfs_ok
)
5911 nfs4_put_stid(&lst
->st_stid
);
5913 ns
= nfs4_alloc_stid(clp
, stateid_slab
, nfs4_free_lock_stateid
);
5917 lst
= init_lock_stateid(openlockstateid(ns
), lo
, fi
, inode
, ost
);
5918 if (lst
== openlockstateid(ns
))
5927 check_lock_length(u64 offset
, u64 length
)
5929 return ((length
== 0) || ((length
!= NFS4_MAX_UINT64
) &&
5930 (length
> ~offset
)));
5933 static void get_lock_access(struct nfs4_ol_stateid
*lock_stp
, u32 access
)
5935 struct nfs4_file
*fp
= lock_stp
->st_stid
.sc_file
;
5937 lockdep_assert_held(&fp
->fi_lock
);
5939 if (test_access(access
, lock_stp
))
5941 __nfs4_file_get_access(fp
, access
);
5942 set_access(access
, lock_stp
);
5946 lookup_or_create_lock_state(struct nfsd4_compound_state
*cstate
,
5947 struct nfs4_ol_stateid
*ost
,
5948 struct nfsd4_lock
*lock
,
5949 struct nfs4_ol_stateid
**plst
, bool *new)
5952 struct nfs4_file
*fi
= ost
->st_stid
.sc_file
;
5953 struct nfs4_openowner
*oo
= openowner(ost
->st_stateowner
);
5954 struct nfs4_client
*cl
= oo
->oo_owner
.so_client
;
5955 struct inode
*inode
= d_inode(cstate
->current_fh
.fh_dentry
);
5956 struct nfs4_lockowner
*lo
;
5957 struct nfs4_ol_stateid
*lst
;
5958 unsigned int strhashval
;
5960 lo
= find_lockowner_str(cl
, &lock
->lk_new_owner
);
5962 strhashval
= ownerstr_hashval(&lock
->lk_new_owner
);
5963 lo
= alloc_init_lock_stateowner(strhashval
, cl
, ost
, lock
);
5965 return nfserr_jukebox
;
5967 /* with an existing lockowner, seqids must be the same */
5968 status
= nfserr_bad_seqid
;
5969 if (!cstate
->minorversion
&&
5970 lock
->lk_new_lock_seqid
!= lo
->lo_owner
.so_seqid
)
5974 lst
= find_or_create_lock_stateid(lo
, fi
, inode
, ost
, new);
5976 status
= nfserr_jukebox
;
5983 nfs4_put_stateowner(&lo
->lo_owner
);
5991 nfsd4_lock(struct svc_rqst
*rqstp
, struct nfsd4_compound_state
*cstate
,
5992 union nfsd4_op_u
*u
)
5994 struct nfsd4_lock
*lock
= &u
->lock
;
5995 struct nfs4_openowner
*open_sop
= NULL
;
5996 struct nfs4_lockowner
*lock_sop
= NULL
;
5997 struct nfs4_ol_stateid
*lock_stp
= NULL
;
5998 struct nfs4_ol_stateid
*open_stp
= NULL
;
5999 struct nfs4_file
*fp
;
6000 struct file
*filp
= NULL
;
6001 struct nfsd4_blocked_lock
*nbl
= NULL
;
6002 struct file_lock
*file_lock
= NULL
;
6003 struct file_lock
*conflock
= NULL
;
6008 unsigned char fl_type
;
6009 unsigned int fl_flags
= FL_POSIX
;
6010 struct net
*net
= SVC_NET(rqstp
);
6011 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
6013 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
6014 (long long) lock
->lk_offset
,
6015 (long long) lock
->lk_length
);
6017 if (check_lock_length(lock
->lk_offset
, lock
->lk_length
))
6018 return nfserr_inval
;
6020 if ((status
= fh_verify(rqstp
, &cstate
->current_fh
,
6021 S_IFREG
, NFSD_MAY_LOCK
))) {
6022 dprintk("NFSD: nfsd4_lock: permission denied!\n");
6026 if (lock
->lk_is_new
) {
6027 if (nfsd4_has_session(cstate
))
6028 /* See rfc 5661 18.10.3: given clientid is ignored: */
6029 memcpy(&lock
->lk_new_clientid
,
6030 &cstate
->session
->se_client
->cl_clientid
,
6031 sizeof(clientid_t
));
6033 status
= nfserr_stale_clientid
;
6034 if (STALE_CLIENTID(&lock
->lk_new_clientid
, nn
))
6037 /* validate and update open stateid and open seqid */
6038 status
= nfs4_preprocess_confirmed_seqid_op(cstate
,
6039 lock
->lk_new_open_seqid
,
6040 &lock
->lk_new_open_stateid
,
6044 mutex_unlock(&open_stp
->st_mutex
);
6045 open_sop
= openowner(open_stp
->st_stateowner
);
6046 status
= nfserr_bad_stateid
;
6047 if (!same_clid(&open_sop
->oo_owner
.so_client
->cl_clientid
,
6048 &lock
->lk_new_clientid
))
6050 status
= lookup_or_create_lock_state(cstate
, open_stp
, lock
,
6053 status
= nfs4_preprocess_seqid_op(cstate
,
6054 lock
->lk_old_lock_seqid
,
6055 &lock
->lk_old_lock_stateid
,
6056 NFS4_LOCK_STID
, &lock_stp
, nn
);
6060 lock_sop
= lockowner(lock_stp
->st_stateowner
);
6062 lkflg
= setlkflg(lock
->lk_type
);
6063 status
= nfs4_check_openmode(lock_stp
, lkflg
);
6067 status
= nfserr_grace
;
6068 if (locks_in_grace(net
) && !lock
->lk_reclaim
)
6070 status
= nfserr_no_grace
;
6071 if (!locks_in_grace(net
) && lock
->lk_reclaim
)
6074 fp
= lock_stp
->st_stid
.sc_file
;
6075 switch (lock
->lk_type
) {
6077 if (nfsd4_has_session(cstate
))
6078 fl_flags
|= FL_SLEEP
;
6081 spin_lock(&fp
->fi_lock
);
6082 filp
= find_readable_file_locked(fp
);
6084 get_lock_access(lock_stp
, NFS4_SHARE_ACCESS_READ
);
6085 spin_unlock(&fp
->fi_lock
);
6088 case NFS4_WRITEW_LT
:
6089 if (nfsd4_has_session(cstate
))
6090 fl_flags
|= FL_SLEEP
;
6093 spin_lock(&fp
->fi_lock
);
6094 filp
= find_writeable_file_locked(fp
);
6096 get_lock_access(lock_stp
, NFS4_SHARE_ACCESS_WRITE
);
6097 spin_unlock(&fp
->fi_lock
);
6101 status
= nfserr_inval
;
6106 status
= nfserr_openmode
;
6110 nbl
= find_or_allocate_block(lock_sop
, &fp
->fi_fhandle
, nn
);
6112 dprintk("NFSD: %s: unable to allocate block!\n", __func__
);
6113 status
= nfserr_jukebox
;
6117 file_lock
= &nbl
->nbl_lock
;
6118 file_lock
->fl_type
= fl_type
;
6119 file_lock
->fl_owner
= (fl_owner_t
)lockowner(nfs4_get_stateowner(&lock_sop
->lo_owner
));
6120 file_lock
->fl_pid
= current
->tgid
;
6121 file_lock
->fl_file
= filp
;
6122 file_lock
->fl_flags
= fl_flags
;
6123 file_lock
->fl_lmops
= &nfsd_posix_mng_ops
;
6124 file_lock
->fl_start
= lock
->lk_offset
;
6125 file_lock
->fl_end
= last_byte_offset(lock
->lk_offset
, lock
->lk_length
);
6126 nfs4_transform_lock_offset(file_lock
);
6128 conflock
= locks_alloc_lock();
6130 dprintk("NFSD: %s: unable to allocate lock!\n", __func__
);
6131 status
= nfserr_jukebox
;
6135 if (fl_flags
& FL_SLEEP
) {
6136 nbl
->nbl_time
= jiffies
;
6137 spin_lock(&nn
->blocked_locks_lock
);
6138 list_add_tail(&nbl
->nbl_list
, &lock_sop
->lo_blocked
);
6139 list_add_tail(&nbl
->nbl_lru
, &nn
->blocked_locks_lru
);
6140 spin_unlock(&nn
->blocked_locks_lock
);
6143 err
= vfs_lock_file(filp
, F_SETLK
, file_lock
, conflock
);
6145 case 0: /* success! */
6146 nfs4_inc_and_copy_stateid(&lock
->lk_resp_stateid
, &lock_stp
->st_stid
);
6148 if (lock
->lk_reclaim
)
6149 nn
->somebody_reclaimed
= true;
6151 case FILE_LOCK_DEFERRED
:
6154 case -EAGAIN
: /* conflock holds conflicting lock */
6155 status
= nfserr_denied
;
6156 dprintk("NFSD: nfsd4_lock: conflicting lock found!\n");
6157 nfs4_set_lock_denied(conflock
, &lock
->lk_denied
);
6160 status
= nfserr_deadlock
;
6163 dprintk("NFSD: nfsd4_lock: vfs_lock_file() failed! status %d\n",err
);
6164 status
= nfserrno(err
);
6169 /* dequeue it if we queued it before */
6170 if (fl_flags
& FL_SLEEP
) {
6171 spin_lock(&nn
->blocked_locks_lock
);
6172 list_del_init(&nbl
->nbl_list
);
6173 list_del_init(&nbl
->nbl_lru
);
6174 spin_unlock(&nn
->blocked_locks_lock
);
6176 free_blocked_lock(nbl
);
6181 /* Bump seqid manually if the 4.0 replay owner is openowner */
6182 if (cstate
->replay_owner
&&
6183 cstate
->replay_owner
!= &lock_sop
->lo_owner
&&
6184 seqid_mutating_err(ntohl(status
)))
6185 lock_sop
->lo_owner
.so_seqid
++;
6188 * If this is a new, never-before-used stateid, and we are
6189 * returning an error, then just go ahead and release it.
6192 release_lock_stateid(lock_stp
);
6194 mutex_unlock(&lock_stp
->st_mutex
);
6196 nfs4_put_stid(&lock_stp
->st_stid
);
6199 nfs4_put_stid(&open_stp
->st_stid
);
6200 nfsd4_bump_seqid(cstate
, status
);
6202 locks_free_lock(conflock
);
6207 * The NFSv4 spec allows a client to do a LOCKT without holding an OPEN,
6208 * so we do a temporary open here just to get an open file to pass to
6209 * vfs_test_lock. (Arguably perhaps test_lock should be done with an
6212 static __be32
nfsd_test_lock(struct svc_rqst
*rqstp
, struct svc_fh
*fhp
, struct file_lock
*lock
)
6215 __be32 err
= nfsd_open(rqstp
, fhp
, S_IFREG
, NFSD_MAY_READ
, &file
);
6217 err
= nfserrno(vfs_test_lock(file
, lock
));
6227 nfsd4_lockt(struct svc_rqst
*rqstp
, struct nfsd4_compound_state
*cstate
,
6228 union nfsd4_op_u
*u
)
6230 struct nfsd4_lockt
*lockt
= &u
->lockt
;
6231 struct file_lock
*file_lock
= NULL
;
6232 struct nfs4_lockowner
*lo
= NULL
;
6234 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
6236 if (locks_in_grace(SVC_NET(rqstp
)))
6237 return nfserr_grace
;
6239 if (check_lock_length(lockt
->lt_offset
, lockt
->lt_length
))
6240 return nfserr_inval
;
6242 if (!nfsd4_has_session(cstate
)) {
6243 status
= lookup_clientid(&lockt
->lt_clientid
, cstate
, nn
);
6248 if ((status
= fh_verify(rqstp
, &cstate
->current_fh
, S_IFREG
, 0)))
6251 file_lock
= locks_alloc_lock();
6253 dprintk("NFSD: %s: unable to allocate lock!\n", __func__
);
6254 status
= nfserr_jukebox
;
6258 switch (lockt
->lt_type
) {
6261 file_lock
->fl_type
= F_RDLCK
;
6264 case NFS4_WRITEW_LT
:
6265 file_lock
->fl_type
= F_WRLCK
;
6268 dprintk("NFSD: nfs4_lockt: bad lock type!\n");
6269 status
= nfserr_inval
;
6273 lo
= find_lockowner_str(cstate
->clp
, &lockt
->lt_owner
);
6275 file_lock
->fl_owner
= (fl_owner_t
)lo
;
6276 file_lock
->fl_pid
= current
->tgid
;
6277 file_lock
->fl_flags
= FL_POSIX
;
6279 file_lock
->fl_start
= lockt
->lt_offset
;
6280 file_lock
->fl_end
= last_byte_offset(lockt
->lt_offset
, lockt
->lt_length
);
6282 nfs4_transform_lock_offset(file_lock
);
6284 status
= nfsd_test_lock(rqstp
, &cstate
->current_fh
, file_lock
);
6288 if (file_lock
->fl_type
!= F_UNLCK
) {
6289 status
= nfserr_denied
;
6290 nfs4_set_lock_denied(file_lock
, &lockt
->lt_denied
);
6294 nfs4_put_stateowner(&lo
->lo_owner
);
6296 locks_free_lock(file_lock
);
6301 nfsd4_locku(struct svc_rqst
*rqstp
, struct nfsd4_compound_state
*cstate
,
6302 union nfsd4_op_u
*u
)
6304 struct nfsd4_locku
*locku
= &u
->locku
;
6305 struct nfs4_ol_stateid
*stp
;
6306 struct file
*filp
= NULL
;
6307 struct file_lock
*file_lock
= NULL
;
6310 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
6312 dprintk("NFSD: nfsd4_locku: start=%Ld length=%Ld\n",
6313 (long long) locku
->lu_offset
,
6314 (long long) locku
->lu_length
);
6316 if (check_lock_length(locku
->lu_offset
, locku
->lu_length
))
6317 return nfserr_inval
;
6319 status
= nfs4_preprocess_seqid_op(cstate
, locku
->lu_seqid
,
6320 &locku
->lu_stateid
, NFS4_LOCK_STID
,
6324 filp
= find_any_file(stp
->st_stid
.sc_file
);
6326 status
= nfserr_lock_range
;
6329 file_lock
= locks_alloc_lock();
6331 dprintk("NFSD: %s: unable to allocate lock!\n", __func__
);
6332 status
= nfserr_jukebox
;
6336 file_lock
->fl_type
= F_UNLCK
;
6337 file_lock
->fl_owner
= (fl_owner_t
)lockowner(nfs4_get_stateowner(stp
->st_stateowner
));
6338 file_lock
->fl_pid
= current
->tgid
;
6339 file_lock
->fl_file
= filp
;
6340 file_lock
->fl_flags
= FL_POSIX
;
6341 file_lock
->fl_lmops
= &nfsd_posix_mng_ops
;
6342 file_lock
->fl_start
= locku
->lu_offset
;
6344 file_lock
->fl_end
= last_byte_offset(locku
->lu_offset
,
6346 nfs4_transform_lock_offset(file_lock
);
6348 err
= vfs_lock_file(filp
, F_SETLK
, file_lock
, NULL
);
6350 dprintk("NFSD: nfs4_locku: vfs_lock_file failed!\n");
6353 nfs4_inc_and_copy_stateid(&locku
->lu_stateid
, &stp
->st_stid
);
6357 mutex_unlock(&stp
->st_mutex
);
6358 nfs4_put_stid(&stp
->st_stid
);
6360 nfsd4_bump_seqid(cstate
, status
);
6362 locks_free_lock(file_lock
);
6366 status
= nfserrno(err
);
6372 * true: locks held by lockowner
6373 * false: no locks held by lockowner
6376 check_for_locks(struct nfs4_file
*fp
, struct nfs4_lockowner
*lowner
)
6378 struct file_lock
*fl
;
6380 struct file
*filp
= find_any_file(fp
);
6381 struct inode
*inode
;
6382 struct file_lock_context
*flctx
;
6385 /* Any valid lock stateid should have some sort of access */
6390 inode
= locks_inode(filp
);
6391 flctx
= inode
->i_flctx
;
6393 if (flctx
&& !list_empty_careful(&flctx
->flc_posix
)) {
6394 spin_lock(&flctx
->flc_lock
);
6395 list_for_each_entry(fl
, &flctx
->flc_posix
, fl_list
) {
6396 if (fl
->fl_owner
== (fl_owner_t
)lowner
) {
6401 spin_unlock(&flctx
->flc_lock
);
6408 nfsd4_release_lockowner(struct svc_rqst
*rqstp
,
6409 struct nfsd4_compound_state
*cstate
,
6410 union nfsd4_op_u
*u
)
6412 struct nfsd4_release_lockowner
*rlockowner
= &u
->release_lockowner
;
6413 clientid_t
*clid
= &rlockowner
->rl_clientid
;
6414 struct nfs4_stateowner
*sop
;
6415 struct nfs4_lockowner
*lo
= NULL
;
6416 struct nfs4_ol_stateid
*stp
;
6417 struct xdr_netobj
*owner
= &rlockowner
->rl_owner
;
6418 unsigned int hashval
= ownerstr_hashval(owner
);
6420 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
6421 struct nfs4_client
*clp
;
6422 LIST_HEAD (reaplist
);
6424 dprintk("nfsd4_release_lockowner clientid: (%08x/%08x):\n",
6425 clid
->cl_boot
, clid
->cl_id
);
6427 status
= lookup_clientid(clid
, cstate
, nn
);
6432 /* Find the matching lock stateowner */
6433 spin_lock(&clp
->cl_lock
);
6434 list_for_each_entry(sop
, &clp
->cl_ownerstr_hashtbl
[hashval
],
6437 if (sop
->so_is_open_owner
|| !same_owner_str(sop
, owner
))
6440 /* see if there are still any locks associated with it */
6441 lo
= lockowner(sop
);
6442 list_for_each_entry(stp
, &sop
->so_stateids
, st_perstateowner
) {
6443 if (check_for_locks(stp
->st_stid
.sc_file
, lo
)) {
6444 status
= nfserr_locks_held
;
6445 spin_unlock(&clp
->cl_lock
);
6450 nfs4_get_stateowner(sop
);
6454 spin_unlock(&clp
->cl_lock
);
6458 unhash_lockowner_locked(lo
);
6459 while (!list_empty(&lo
->lo_owner
.so_stateids
)) {
6460 stp
= list_first_entry(&lo
->lo_owner
.so_stateids
,
6461 struct nfs4_ol_stateid
,
6463 WARN_ON(!unhash_lock_stateid(stp
));
6464 put_ol_stateid_locked(stp
, &reaplist
);
6466 spin_unlock(&clp
->cl_lock
);
6467 free_ol_stateid_reaplist(&reaplist
);
6468 remove_blocked_locks(lo
);
6469 nfs4_put_stateowner(&lo
->lo_owner
);
6474 static inline struct nfs4_client_reclaim
*
6477 return kmalloc(sizeof(struct nfs4_client_reclaim
), GFP_KERNEL
);
6481 nfs4_has_reclaimed_state(struct xdr_netobj name
, struct nfsd_net
*nn
)
6483 struct nfs4_client_reclaim
*crp
;
6485 crp
= nfsd4_find_reclaim_client(name
, nn
);
6486 return (crp
&& crp
->cr_clp
);
6490 * failure => all reset bets are off, nfserr_no_grace...
6492 * The caller is responsible for freeing name.data if NULL is returned (it
6493 * will be freed in nfs4_remove_reclaim_record in the normal case).
6495 struct nfs4_client_reclaim
*
6496 nfs4_client_to_reclaim(struct xdr_netobj name
, struct nfsd_net
*nn
)
6498 unsigned int strhashval
;
6499 struct nfs4_client_reclaim
*crp
;
6501 dprintk("NFSD nfs4_client_to_reclaim NAME: %.*s\n", name
.len
, name
.data
);
6502 crp
= alloc_reclaim();
6504 strhashval
= clientstr_hashval(name
);
6505 INIT_LIST_HEAD(&crp
->cr_strhash
);
6506 list_add(&crp
->cr_strhash
, &nn
->reclaim_str_hashtbl
[strhashval
]);
6507 crp
->cr_name
.data
= name
.data
;
6508 crp
->cr_name
.len
= name
.len
;
6510 nn
->reclaim_str_hashtbl_size
++;
6516 nfs4_remove_reclaim_record(struct nfs4_client_reclaim
*crp
, struct nfsd_net
*nn
)
6518 list_del(&crp
->cr_strhash
);
6519 kfree(crp
->cr_name
.data
);
6521 nn
->reclaim_str_hashtbl_size
--;
6525 nfs4_release_reclaim(struct nfsd_net
*nn
)
6527 struct nfs4_client_reclaim
*crp
= NULL
;
6530 for (i
= 0; i
< CLIENT_HASH_SIZE
; i
++) {
6531 while (!list_empty(&nn
->reclaim_str_hashtbl
[i
])) {
6532 crp
= list_entry(nn
->reclaim_str_hashtbl
[i
].next
,
6533 struct nfs4_client_reclaim
, cr_strhash
);
6534 nfs4_remove_reclaim_record(crp
, nn
);
6537 WARN_ON_ONCE(nn
->reclaim_str_hashtbl_size
);
6541 * called from OPEN, CLAIM_PREVIOUS with a new clientid. */
6542 struct nfs4_client_reclaim
*
6543 nfsd4_find_reclaim_client(struct xdr_netobj name
, struct nfsd_net
*nn
)
6545 unsigned int strhashval
;
6546 struct nfs4_client_reclaim
*crp
= NULL
;
6548 dprintk("NFSD: nfs4_find_reclaim_client for name %.*s\n", name
.len
, name
.data
);
6550 strhashval
= clientstr_hashval(name
);
6551 list_for_each_entry(crp
, &nn
->reclaim_str_hashtbl
[strhashval
], cr_strhash
) {
6552 if (compare_blob(&crp
->cr_name
, &name
) == 0) {
6560 * Called from OPEN. Look for clientid in reclaim list.
6563 nfs4_check_open_reclaim(clientid_t
*clid
,
6564 struct nfsd4_compound_state
*cstate
,
6565 struct nfsd_net
*nn
)
6569 /* find clientid in conf_id_hashtbl */
6570 status
= lookup_clientid(clid
, cstate
, nn
);
6572 return nfserr_reclaim_bad
;
6574 if (test_bit(NFSD4_CLIENT_RECLAIM_COMPLETE
, &cstate
->clp
->cl_flags
))
6575 return nfserr_no_grace
;
6577 if (nfsd4_client_record_check(cstate
->clp
))
6578 return nfserr_reclaim_bad
;
6583 #ifdef CONFIG_NFSD_FAULT_INJECTION
6585 put_client(struct nfs4_client
*clp
)
6587 atomic_dec(&clp
->cl_refcount
);
6590 static struct nfs4_client
*
6591 nfsd_find_client(struct sockaddr_storage
*addr
, size_t addr_size
)
6593 struct nfs4_client
*clp
;
6594 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
6597 if (!nfsd_netns_ready(nn
))
6600 list_for_each_entry(clp
, &nn
->client_lru
, cl_lru
) {
6601 if (memcmp(&clp
->cl_addr
, addr
, addr_size
) == 0)
6608 nfsd_inject_print_clients(void)
6610 struct nfs4_client
*clp
;
6612 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
6614 char buf
[INET6_ADDRSTRLEN
];
6616 if (!nfsd_netns_ready(nn
))
6619 spin_lock(&nn
->client_lock
);
6620 list_for_each_entry(clp
, &nn
->client_lru
, cl_lru
) {
6621 rpc_ntop((struct sockaddr
*)&clp
->cl_addr
, buf
, sizeof(buf
));
6622 pr_info("NFS Client: %s\n", buf
);
6625 spin_unlock(&nn
->client_lock
);
6631 nfsd_inject_forget_client(struct sockaddr_storage
*addr
, size_t addr_size
)
6634 struct nfs4_client
*clp
;
6635 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
6638 if (!nfsd_netns_ready(nn
))
6641 spin_lock(&nn
->client_lock
);
6642 clp
= nfsd_find_client(addr
, addr_size
);
6644 if (mark_client_expired_locked(clp
) == nfs_ok
)
6649 spin_unlock(&nn
->client_lock
);
6658 nfsd_inject_forget_clients(u64 max
)
6661 struct nfs4_client
*clp
, *next
;
6662 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
6664 LIST_HEAD(reaplist
);
6666 if (!nfsd_netns_ready(nn
))
6669 spin_lock(&nn
->client_lock
);
6670 list_for_each_entry_safe(clp
, next
, &nn
->client_lru
, cl_lru
) {
6671 if (mark_client_expired_locked(clp
) == nfs_ok
) {
6672 list_add(&clp
->cl_lru
, &reaplist
);
6673 if (max
!= 0 && ++count
>= max
)
6677 spin_unlock(&nn
->client_lock
);
6679 list_for_each_entry_safe(clp
, next
, &reaplist
, cl_lru
)
6685 static void nfsd_print_count(struct nfs4_client
*clp
, unsigned int count
,
6688 char buf
[INET6_ADDRSTRLEN
];
6689 rpc_ntop((struct sockaddr
*)&clp
->cl_addr
, buf
, sizeof(buf
));
6690 printk(KERN_INFO
"NFS Client: %s has %u %s\n", buf
, count
, type
);
6694 nfsd_inject_add_lock_to_list(struct nfs4_ol_stateid
*lst
,
6695 struct list_head
*collect
)
6697 struct nfs4_client
*clp
= lst
->st_stid
.sc_client
;
6698 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
6704 lockdep_assert_held(&nn
->client_lock
);
6705 atomic_inc(&clp
->cl_refcount
);
6706 list_add(&lst
->st_locks
, collect
);
6709 static u64
nfsd_foreach_client_lock(struct nfs4_client
*clp
, u64 max
,
6710 struct list_head
*collect
,
6711 bool (*func
)(struct nfs4_ol_stateid
*))
6713 struct nfs4_openowner
*oop
;
6714 struct nfs4_ol_stateid
*stp
, *st_next
;
6715 struct nfs4_ol_stateid
*lst
, *lst_next
;
6718 spin_lock(&clp
->cl_lock
);
6719 list_for_each_entry(oop
, &clp
->cl_openowners
, oo_perclient
) {
6720 list_for_each_entry_safe(stp
, st_next
,
6721 &oop
->oo_owner
.so_stateids
, st_perstateowner
) {
6722 list_for_each_entry_safe(lst
, lst_next
,
6723 &stp
->st_locks
, st_locks
) {
6726 nfsd_inject_add_lock_to_list(lst
,
6731 * Despite the fact that these functions deal
6732 * with 64-bit integers for "count", we must
6733 * ensure that it doesn't blow up the
6734 * clp->cl_refcount. Throw a warning if we
6735 * start to approach INT_MAX here.
6737 WARN_ON_ONCE(count
== (INT_MAX
/ 2));
6744 spin_unlock(&clp
->cl_lock
);
6750 nfsd_collect_client_locks(struct nfs4_client
*clp
, struct list_head
*collect
,
6753 return nfsd_foreach_client_lock(clp
, max
, collect
, unhash_lock_stateid
);
6757 nfsd_print_client_locks(struct nfs4_client
*clp
)
6759 u64 count
= nfsd_foreach_client_lock(clp
, 0, NULL
, NULL
);
6760 nfsd_print_count(clp
, count
, "locked files");
6765 nfsd_inject_print_locks(void)
6767 struct nfs4_client
*clp
;
6769 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
6772 if (!nfsd_netns_ready(nn
))
6775 spin_lock(&nn
->client_lock
);
6776 list_for_each_entry(clp
, &nn
->client_lru
, cl_lru
)
6777 count
+= nfsd_print_client_locks(clp
);
6778 spin_unlock(&nn
->client_lock
);
6784 nfsd_reap_locks(struct list_head
*reaplist
)
6786 struct nfs4_client
*clp
;
6787 struct nfs4_ol_stateid
*stp
, *next
;
6789 list_for_each_entry_safe(stp
, next
, reaplist
, st_locks
) {
6790 list_del_init(&stp
->st_locks
);
6791 clp
= stp
->st_stid
.sc_client
;
6792 nfs4_put_stid(&stp
->st_stid
);
6798 nfsd_inject_forget_client_locks(struct sockaddr_storage
*addr
, size_t addr_size
)
6800 unsigned int count
= 0;
6801 struct nfs4_client
*clp
;
6802 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
6804 LIST_HEAD(reaplist
);
6806 if (!nfsd_netns_ready(nn
))
6809 spin_lock(&nn
->client_lock
);
6810 clp
= nfsd_find_client(addr
, addr_size
);
6812 count
= nfsd_collect_client_locks(clp
, &reaplist
, 0);
6813 spin_unlock(&nn
->client_lock
);
6814 nfsd_reap_locks(&reaplist
);
6819 nfsd_inject_forget_locks(u64 max
)
6822 struct nfs4_client
*clp
;
6823 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
6825 LIST_HEAD(reaplist
);
6827 if (!nfsd_netns_ready(nn
))
6830 spin_lock(&nn
->client_lock
);
6831 list_for_each_entry(clp
, &nn
->client_lru
, cl_lru
) {
6832 count
+= nfsd_collect_client_locks(clp
, &reaplist
, max
- count
);
6833 if (max
!= 0 && count
>= max
)
6836 spin_unlock(&nn
->client_lock
);
6837 nfsd_reap_locks(&reaplist
);
6842 nfsd_foreach_client_openowner(struct nfs4_client
*clp
, u64 max
,
6843 struct list_head
*collect
,
6844 void (*func
)(struct nfs4_openowner
*))
6846 struct nfs4_openowner
*oop
, *next
;
6847 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
6851 lockdep_assert_held(&nn
->client_lock
);
6853 spin_lock(&clp
->cl_lock
);
6854 list_for_each_entry_safe(oop
, next
, &clp
->cl_openowners
, oo_perclient
) {
6858 atomic_inc(&clp
->cl_refcount
);
6859 list_add(&oop
->oo_perclient
, collect
);
6864 * Despite the fact that these functions deal with
6865 * 64-bit integers for "count", we must ensure that
6866 * it doesn't blow up the clp->cl_refcount. Throw a
6867 * warning if we start to approach INT_MAX here.
6869 WARN_ON_ONCE(count
== (INT_MAX
/ 2));
6873 spin_unlock(&clp
->cl_lock
);
6879 nfsd_print_client_openowners(struct nfs4_client
*clp
)
6881 u64 count
= nfsd_foreach_client_openowner(clp
, 0, NULL
, NULL
);
6883 nfsd_print_count(clp
, count
, "openowners");
6888 nfsd_collect_client_openowners(struct nfs4_client
*clp
,
6889 struct list_head
*collect
, u64 max
)
6891 return nfsd_foreach_client_openowner(clp
, max
, collect
,
6892 unhash_openowner_locked
);
6896 nfsd_inject_print_openowners(void)
6898 struct nfs4_client
*clp
;
6900 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
6903 if (!nfsd_netns_ready(nn
))
6906 spin_lock(&nn
->client_lock
);
6907 list_for_each_entry(clp
, &nn
->client_lru
, cl_lru
)
6908 count
+= nfsd_print_client_openowners(clp
);
6909 spin_unlock(&nn
->client_lock
);
6915 nfsd_reap_openowners(struct list_head
*reaplist
)
6917 struct nfs4_client
*clp
;
6918 struct nfs4_openowner
*oop
, *next
;
6920 list_for_each_entry_safe(oop
, next
, reaplist
, oo_perclient
) {
6921 list_del_init(&oop
->oo_perclient
);
6922 clp
= oop
->oo_owner
.so_client
;
6923 release_openowner(oop
);
6929 nfsd_inject_forget_client_openowners(struct sockaddr_storage
*addr
,
6932 unsigned int count
= 0;
6933 struct nfs4_client
*clp
;
6934 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
6936 LIST_HEAD(reaplist
);
6938 if (!nfsd_netns_ready(nn
))
6941 spin_lock(&nn
->client_lock
);
6942 clp
= nfsd_find_client(addr
, addr_size
);
6944 count
= nfsd_collect_client_openowners(clp
, &reaplist
, 0);
6945 spin_unlock(&nn
->client_lock
);
6946 nfsd_reap_openowners(&reaplist
);
6951 nfsd_inject_forget_openowners(u64 max
)
6954 struct nfs4_client
*clp
;
6955 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
6957 LIST_HEAD(reaplist
);
6959 if (!nfsd_netns_ready(nn
))
6962 spin_lock(&nn
->client_lock
);
6963 list_for_each_entry(clp
, &nn
->client_lru
, cl_lru
) {
6964 count
+= nfsd_collect_client_openowners(clp
, &reaplist
,
6966 if (max
!= 0 && count
>= max
)
6969 spin_unlock(&nn
->client_lock
);
6970 nfsd_reap_openowners(&reaplist
);
6974 static u64
nfsd_find_all_delegations(struct nfs4_client
*clp
, u64 max
,
6975 struct list_head
*victims
)
6977 struct nfs4_delegation
*dp
, *next
;
6978 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
6982 lockdep_assert_held(&nn
->client_lock
);
6984 spin_lock(&state_lock
);
6985 list_for_each_entry_safe(dp
, next
, &clp
->cl_delegations
, dl_perclnt
) {
6988 * It's not safe to mess with delegations that have a
6989 * non-zero dl_time. They might have already been broken
6990 * and could be processed by the laundromat outside of
6991 * the state_lock. Just leave them be.
6993 if (dp
->dl_time
!= 0)
6996 atomic_inc(&clp
->cl_refcount
);
6997 WARN_ON(!unhash_delegation_locked(dp
));
6998 list_add(&dp
->dl_recall_lru
, victims
);
7002 * Despite the fact that these functions deal with
7003 * 64-bit integers for "count", we must ensure that
7004 * it doesn't blow up the clp->cl_refcount. Throw a
7005 * warning if we start to approach INT_MAX here.
7007 WARN_ON_ONCE(count
== (INT_MAX
/ 2));
7011 spin_unlock(&state_lock
);
7016 nfsd_print_client_delegations(struct nfs4_client
*clp
)
7018 u64 count
= nfsd_find_all_delegations(clp
, 0, NULL
);
7020 nfsd_print_count(clp
, count
, "delegations");
7025 nfsd_inject_print_delegations(void)
7027 struct nfs4_client
*clp
;
7029 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
7032 if (!nfsd_netns_ready(nn
))
7035 spin_lock(&nn
->client_lock
);
7036 list_for_each_entry(clp
, &nn
->client_lru
, cl_lru
)
7037 count
+= nfsd_print_client_delegations(clp
);
7038 spin_unlock(&nn
->client_lock
);
7044 nfsd_forget_delegations(struct list_head
*reaplist
)
7046 struct nfs4_client
*clp
;
7047 struct nfs4_delegation
*dp
, *next
;
7049 list_for_each_entry_safe(dp
, next
, reaplist
, dl_recall_lru
) {
7050 list_del_init(&dp
->dl_recall_lru
);
7051 clp
= dp
->dl_stid
.sc_client
;
7052 revoke_delegation(dp
);
7058 nfsd_inject_forget_client_delegations(struct sockaddr_storage
*addr
,
7062 struct nfs4_client
*clp
;
7063 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
7065 LIST_HEAD(reaplist
);
7067 if (!nfsd_netns_ready(nn
))
7070 spin_lock(&nn
->client_lock
);
7071 clp
= nfsd_find_client(addr
, addr_size
);
7073 count
= nfsd_find_all_delegations(clp
, 0, &reaplist
);
7074 spin_unlock(&nn
->client_lock
);
7076 nfsd_forget_delegations(&reaplist
);
7081 nfsd_inject_forget_delegations(u64 max
)
7084 struct nfs4_client
*clp
;
7085 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
7087 LIST_HEAD(reaplist
);
7089 if (!nfsd_netns_ready(nn
))
7092 spin_lock(&nn
->client_lock
);
7093 list_for_each_entry(clp
, &nn
->client_lru
, cl_lru
) {
7094 count
+= nfsd_find_all_delegations(clp
, max
- count
, &reaplist
);
7095 if (max
!= 0 && count
>= max
)
7098 spin_unlock(&nn
->client_lock
);
7099 nfsd_forget_delegations(&reaplist
);
7104 nfsd_recall_delegations(struct list_head
*reaplist
)
7106 struct nfs4_client
*clp
;
7107 struct nfs4_delegation
*dp
, *next
;
7109 list_for_each_entry_safe(dp
, next
, reaplist
, dl_recall_lru
) {
7110 list_del_init(&dp
->dl_recall_lru
);
7111 clp
= dp
->dl_stid
.sc_client
;
7113 * We skipped all entries that had a zero dl_time before,
7114 * so we can now reset the dl_time back to 0. If a delegation
7115 * break comes in now, then it won't make any difference since
7116 * we're recalling it either way.
7118 spin_lock(&state_lock
);
7120 spin_unlock(&state_lock
);
7121 nfsd_break_one_deleg(dp
);
7127 nfsd_inject_recall_client_delegations(struct sockaddr_storage
*addr
,
7131 struct nfs4_client
*clp
;
7132 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
7134 LIST_HEAD(reaplist
);
7136 if (!nfsd_netns_ready(nn
))
7139 spin_lock(&nn
->client_lock
);
7140 clp
= nfsd_find_client(addr
, addr_size
);
7142 count
= nfsd_find_all_delegations(clp
, 0, &reaplist
);
7143 spin_unlock(&nn
->client_lock
);
7145 nfsd_recall_delegations(&reaplist
);
7150 nfsd_inject_recall_delegations(u64 max
)
7153 struct nfs4_client
*clp
, *next
;
7154 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
7156 LIST_HEAD(reaplist
);
7158 if (!nfsd_netns_ready(nn
))
7161 spin_lock(&nn
->client_lock
);
7162 list_for_each_entry_safe(clp
, next
, &nn
->client_lru
, cl_lru
) {
7163 count
+= nfsd_find_all_delegations(clp
, max
- count
, &reaplist
);
7164 if (max
!= 0 && ++count
>= max
)
7167 spin_unlock(&nn
->client_lock
);
7168 nfsd_recall_delegations(&reaplist
);
7171 #endif /* CONFIG_NFSD_FAULT_INJECTION */
7174 * Since the lifetime of a delegation isn't limited to that of an open, a
7175 * client may quite reasonably hang on to a delegation as long as it has
7176 * the inode cached. This becomes an obvious problem the first time a
7177 * client's inode cache approaches the size of the server's total memory.
7179 * For now we avoid this problem by imposing a hard limit on the number
7180 * of delegations, which varies according to the server's memory size.
7183 set_max_delegations(void)
7186 * Allow at most 4 delegations per megabyte of RAM. Quick
7187 * estimates suggest that in the worst case (where every delegation
7188 * is for a different inode), a delegation could take about 1.5K,
7189 * giving a worst case usage of about 6% of memory.
7191 max_delegations
= nr_free_buffer_pages() >> (20 - 2 - PAGE_SHIFT
);
7194 static int nfs4_state_create_net(struct net
*net
)
7196 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
7199 nn
->conf_id_hashtbl
= kmalloc_array(CLIENT_HASH_SIZE
,
7200 sizeof(struct list_head
),
7202 if (!nn
->conf_id_hashtbl
)
7204 nn
->unconf_id_hashtbl
= kmalloc_array(CLIENT_HASH_SIZE
,
7205 sizeof(struct list_head
),
7207 if (!nn
->unconf_id_hashtbl
)
7209 nn
->sessionid_hashtbl
= kmalloc_array(SESSION_HASH_SIZE
,
7210 sizeof(struct list_head
),
7212 if (!nn
->sessionid_hashtbl
)
7215 for (i
= 0; i
< CLIENT_HASH_SIZE
; i
++) {
7216 INIT_LIST_HEAD(&nn
->conf_id_hashtbl
[i
]);
7217 INIT_LIST_HEAD(&nn
->unconf_id_hashtbl
[i
]);
7219 for (i
= 0; i
< SESSION_HASH_SIZE
; i
++)
7220 INIT_LIST_HEAD(&nn
->sessionid_hashtbl
[i
]);
7221 nn
->conf_name_tree
= RB_ROOT
;
7222 nn
->unconf_name_tree
= RB_ROOT
;
7223 nn
->boot_time
= get_seconds();
7224 nn
->grace_ended
= false;
7225 nn
->nfsd4_manager
.block_opens
= true;
7226 INIT_LIST_HEAD(&nn
->nfsd4_manager
.list
);
7227 INIT_LIST_HEAD(&nn
->client_lru
);
7228 INIT_LIST_HEAD(&nn
->close_lru
);
7229 INIT_LIST_HEAD(&nn
->del_recall_lru
);
7230 spin_lock_init(&nn
->client_lock
);
7231 spin_lock_init(&nn
->s2s_cp_lock
);
7232 idr_init(&nn
->s2s_cp_stateids
);
7234 spin_lock_init(&nn
->blocked_locks_lock
);
7235 INIT_LIST_HEAD(&nn
->blocked_locks_lru
);
7237 INIT_DELAYED_WORK(&nn
->laundromat_work
, laundromat_main
);
7243 kfree(nn
->unconf_id_hashtbl
);
7245 kfree(nn
->conf_id_hashtbl
);
7251 nfs4_state_destroy_net(struct net
*net
)
7254 struct nfs4_client
*clp
= NULL
;
7255 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
7257 for (i
= 0; i
< CLIENT_HASH_SIZE
; i
++) {
7258 while (!list_empty(&nn
->conf_id_hashtbl
[i
])) {
7259 clp
= list_entry(nn
->conf_id_hashtbl
[i
].next
, struct nfs4_client
, cl_idhash
);
7260 destroy_client(clp
);
7264 WARN_ON(!list_empty(&nn
->blocked_locks_lru
));
7266 for (i
= 0; i
< CLIENT_HASH_SIZE
; i
++) {
7267 while (!list_empty(&nn
->unconf_id_hashtbl
[i
])) {
7268 clp
= list_entry(nn
->unconf_id_hashtbl
[i
].next
, struct nfs4_client
, cl_idhash
);
7269 destroy_client(clp
);
7273 kfree(nn
->sessionid_hashtbl
);
7274 kfree(nn
->unconf_id_hashtbl
);
7275 kfree(nn
->conf_id_hashtbl
);
7280 nfs4_state_start_net(struct net
*net
)
7282 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
7285 ret
= nfs4_state_create_net(net
);
7288 locks_start_grace(net
, &nn
->nfsd4_manager
);
7289 nfsd4_client_tracking_init(net
);
7290 if (nn
->track_reclaim_completes
&& nn
->reclaim_str_hashtbl_size
== 0)
7292 printk(KERN_INFO
"NFSD: starting %ld-second grace period (net %x)\n",
7293 nn
->nfsd4_grace
, net
->ns
.inum
);
7294 queue_delayed_work(laundry_wq
, &nn
->laundromat_work
, nn
->nfsd4_grace
* HZ
);
7298 printk(KERN_INFO
"NFSD: no clients to reclaim, skipping NFSv4 grace period (net %x)\n",
7300 queue_delayed_work(laundry_wq
, &nn
->laundromat_work
, nn
->nfsd4_lease
* HZ
);
7301 nfsd4_end_grace(nn
);
7305 /* initialization to perform when the nfsd service is started: */
7308 nfs4_state_start(void)
7312 laundry_wq
= alloc_workqueue("%s", WQ_UNBOUND
, 0, "nfsd4");
7313 if (laundry_wq
== NULL
) {
7317 ret
= nfsd4_create_callback_queue();
7319 goto out_free_laundry
;
7321 set_max_delegations();
7325 destroy_workqueue(laundry_wq
);
7331 nfs4_state_shutdown_net(struct net
*net
)
7333 struct nfs4_delegation
*dp
= NULL
;
7334 struct list_head
*pos
, *next
, reaplist
;
7335 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
7337 cancel_delayed_work_sync(&nn
->laundromat_work
);
7338 locks_end_grace(&nn
->nfsd4_manager
);
7340 INIT_LIST_HEAD(&reaplist
);
7341 spin_lock(&state_lock
);
7342 list_for_each_safe(pos
, next
, &nn
->del_recall_lru
) {
7343 dp
= list_entry (pos
, struct nfs4_delegation
, dl_recall_lru
);
7344 WARN_ON(!unhash_delegation_locked(dp
));
7345 list_add(&dp
->dl_recall_lru
, &reaplist
);
7347 spin_unlock(&state_lock
);
7348 list_for_each_safe(pos
, next
, &reaplist
) {
7349 dp
= list_entry (pos
, struct nfs4_delegation
, dl_recall_lru
);
7350 list_del_init(&dp
->dl_recall_lru
);
7351 destroy_unhashed_deleg(dp
);
7354 nfsd4_client_tracking_exit(net
);
7355 nfs4_state_destroy_net(net
);
7359 nfs4_state_shutdown(void)
7361 destroy_workqueue(laundry_wq
);
7362 nfsd4_destroy_callback_queue();
7366 get_stateid(struct nfsd4_compound_state
*cstate
, stateid_t
*stateid
)
7368 if (HAS_STATE_ID(cstate
, CURRENT_STATE_ID_FLAG
) && CURRENT_STATEID(stateid
))
7369 memcpy(stateid
, &cstate
->current_stateid
, sizeof(stateid_t
));
7373 put_stateid(struct nfsd4_compound_state
*cstate
, stateid_t
*stateid
)
7375 if (cstate
->minorversion
) {
7376 memcpy(&cstate
->current_stateid
, stateid
, sizeof(stateid_t
));
7377 SET_STATE_ID(cstate
, CURRENT_STATE_ID_FLAG
);
7382 clear_current_stateid(struct nfsd4_compound_state
*cstate
)
7384 CLEAR_STATE_ID(cstate
, CURRENT_STATE_ID_FLAG
);
7388 * functions to set current state id
7391 nfsd4_set_opendowngradestateid(struct nfsd4_compound_state
*cstate
,
7392 union nfsd4_op_u
*u
)
7394 put_stateid(cstate
, &u
->open_downgrade
.od_stateid
);
7398 nfsd4_set_openstateid(struct nfsd4_compound_state
*cstate
,
7399 union nfsd4_op_u
*u
)
7401 put_stateid(cstate
, &u
->open
.op_stateid
);
7405 nfsd4_set_closestateid(struct nfsd4_compound_state
*cstate
,
7406 union nfsd4_op_u
*u
)
7408 put_stateid(cstate
, &u
->close
.cl_stateid
);
7412 nfsd4_set_lockstateid(struct nfsd4_compound_state
*cstate
,
7413 union nfsd4_op_u
*u
)
7415 put_stateid(cstate
, &u
->lock
.lk_resp_stateid
);
7419 * functions to consume current state id
7423 nfsd4_get_opendowngradestateid(struct nfsd4_compound_state
*cstate
,
7424 union nfsd4_op_u
*u
)
7426 get_stateid(cstate
, &u
->open_downgrade
.od_stateid
);
7430 nfsd4_get_delegreturnstateid(struct nfsd4_compound_state
*cstate
,
7431 union nfsd4_op_u
*u
)
7433 get_stateid(cstate
, &u
->delegreturn
.dr_stateid
);
7437 nfsd4_get_freestateid(struct nfsd4_compound_state
*cstate
,
7438 union nfsd4_op_u
*u
)
7440 get_stateid(cstate
, &u
->free_stateid
.fr_stateid
);
7444 nfsd4_get_setattrstateid(struct nfsd4_compound_state
*cstate
,
7445 union nfsd4_op_u
*u
)
7447 get_stateid(cstate
, &u
->setattr
.sa_stateid
);
7451 nfsd4_get_closestateid(struct nfsd4_compound_state
*cstate
,
7452 union nfsd4_op_u
*u
)
7454 get_stateid(cstate
, &u
->close
.cl_stateid
);
7458 nfsd4_get_lockustateid(struct nfsd4_compound_state
*cstate
,
7459 union nfsd4_op_u
*u
)
7461 get_stateid(cstate
, &u
->locku
.lu_stateid
);
7465 nfsd4_get_readstateid(struct nfsd4_compound_state
*cstate
,
7466 union nfsd4_op_u
*u
)
7468 get_stateid(cstate
, &u
->read
.rd_stateid
);
7472 nfsd4_get_writestateid(struct nfsd4_compound_state
*cstate
,
7473 union nfsd4_op_u
*u
)
7475 get_stateid(cstate
, &u
->write
.wr_stateid
);