4 * Client-side XDR for NFSv4.
6 * Copyright (c) 2002 The Regents of the University of Michigan.
9 * Kendrick Smith <kmsmith@umich.edu>
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its
21 * contributors may be used to endorse or promote products derived
22 * from this software without specific prior written permission.
24 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
25 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
26 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
27 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
32 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
33 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
34 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 * Implementation of the NFSv4 state model. For the time being,
37 * this is minimal, but will be made much more complex in a
41 #include <linux/kernel.h>
42 #include <linux/slab.h>
43 #include <linux/smp_lock.h>
44 #include <linux/nfs_fs.h>
45 #include <linux/nfs_idmap.h>
46 #include <linux/kthread.h>
47 #include <linux/module.h>
48 #include <linux/random.h>
49 #include <linux/workqueue.h>
50 #include <linux/bitops.h>
54 #include "delegation.h"
57 #define OPENOWNER_POOL_SIZE 8
59 const nfs4_stateid zero_stateid
;
61 static LIST_HEAD(nfs4_clientid_list
);
63 static int nfs4_init_client(struct nfs_client
*clp
, struct rpc_cred
*cred
)
65 int status
= nfs4_proc_setclientid(clp
, NFS4_CALLBACK
,
66 nfs_callback_tcpport
, cred
);
68 status
= nfs4_proc_setclientid_confirm(clp
, cred
);
70 nfs4_schedule_state_renewal(clp
);
74 static struct rpc_cred
*nfs4_get_machine_cred(struct nfs_client
*clp
)
76 struct rpc_cred
*cred
= NULL
;
78 spin_lock(&clp
->cl_lock
);
79 if (clp
->cl_machine_cred
!= NULL
)
80 cred
= get_rpccred(clp
->cl_machine_cred
);
81 spin_unlock(&clp
->cl_lock
);
85 static void nfs4_clear_machine_cred(struct nfs_client
*clp
)
87 struct rpc_cred
*cred
;
89 spin_lock(&clp
->cl_lock
);
90 cred
= clp
->cl_machine_cred
;
91 clp
->cl_machine_cred
= NULL
;
92 spin_unlock(&clp
->cl_lock
);
97 struct rpc_cred
*nfs4_get_renew_cred(struct nfs_client
*clp
)
99 struct nfs4_state_owner
*sp
;
101 struct rpc_cred
*cred
= NULL
;
103 for (pos
= rb_first(&clp
->cl_state_owners
); pos
!= NULL
; pos
= rb_next(pos
)) {
104 sp
= rb_entry(pos
, struct nfs4_state_owner
, so_client_node
);
105 if (list_empty(&sp
->so_states
))
107 cred
= get_rpccred(sp
->so_cred
);
113 static struct rpc_cred
*nfs4_get_setclientid_cred(struct nfs_client
*clp
)
115 struct nfs4_state_owner
*sp
;
117 struct rpc_cred
*cred
;
119 cred
= nfs4_get_machine_cred(clp
);
122 pos
= rb_first(&clp
->cl_state_owners
);
124 sp
= rb_entry(pos
, struct nfs4_state_owner
, so_client_node
);
125 cred
= get_rpccred(sp
->so_cred
);
131 static void nfs_alloc_unique_id(struct rb_root
*root
, struct nfs_unique_id
*new,
132 __u64 minval
, int maxbits
)
134 struct rb_node
**p
, *parent
;
135 struct nfs_unique_id
*pos
;
139 mask
= (1ULL << maxbits
) - 1ULL;
141 /* Ensure distribution is more or less flat */
142 get_random_bytes(&new->id
, sizeof(new->id
));
144 if (new->id
< minval
)
152 pos
= rb_entry(parent
, struct nfs_unique_id
, rb_node
);
154 if (new->id
< pos
->id
)
156 else if (new->id
> pos
->id
)
161 rb_link_node(&new->rb_node
, parent
, p
);
162 rb_insert_color(&new->rb_node
, root
);
167 if (new->id
< minval
|| (new->id
& mask
) != new->id
) {
171 parent
= rb_next(parent
);
174 pos
= rb_entry(parent
, struct nfs_unique_id
, rb_node
);
175 if (new->id
< pos
->id
)
181 static void nfs_free_unique_id(struct rb_root
*root
, struct nfs_unique_id
*id
)
183 rb_erase(&id
->rb_node
, root
);
186 static struct nfs4_state_owner
*
187 nfs4_find_state_owner(struct nfs_server
*server
, struct rpc_cred
*cred
)
189 struct nfs_client
*clp
= server
->nfs_client
;
190 struct rb_node
**p
= &clp
->cl_state_owners
.rb_node
,
192 struct nfs4_state_owner
*sp
, *res
= NULL
;
196 sp
= rb_entry(parent
, struct nfs4_state_owner
, so_client_node
);
198 if (server
< sp
->so_server
) {
199 p
= &parent
->rb_left
;
202 if (server
> sp
->so_server
) {
203 p
= &parent
->rb_right
;
206 if (cred
< sp
->so_cred
)
207 p
= &parent
->rb_left
;
208 else if (cred
> sp
->so_cred
)
209 p
= &parent
->rb_right
;
211 atomic_inc(&sp
->so_count
);
219 static struct nfs4_state_owner
*
220 nfs4_insert_state_owner(struct nfs_client
*clp
, struct nfs4_state_owner
*new)
222 struct rb_node
**p
= &clp
->cl_state_owners
.rb_node
,
224 struct nfs4_state_owner
*sp
;
228 sp
= rb_entry(parent
, struct nfs4_state_owner
, so_client_node
);
230 if (new->so_server
< sp
->so_server
) {
231 p
= &parent
->rb_left
;
234 if (new->so_server
> sp
->so_server
) {
235 p
= &parent
->rb_right
;
238 if (new->so_cred
< sp
->so_cred
)
239 p
= &parent
->rb_left
;
240 else if (new->so_cred
> sp
->so_cred
)
241 p
= &parent
->rb_right
;
243 atomic_inc(&sp
->so_count
);
247 nfs_alloc_unique_id(&clp
->cl_openowner_id
, &new->so_owner_id
, 1, 64);
248 rb_link_node(&new->so_client_node
, parent
, p
);
249 rb_insert_color(&new->so_client_node
, &clp
->cl_state_owners
);
254 nfs4_remove_state_owner(struct nfs_client
*clp
, struct nfs4_state_owner
*sp
)
256 if (!RB_EMPTY_NODE(&sp
->so_client_node
))
257 rb_erase(&sp
->so_client_node
, &clp
->cl_state_owners
);
258 nfs_free_unique_id(&clp
->cl_openowner_id
, &sp
->so_owner_id
);
262 * nfs4_alloc_state_owner(): this is called on the OPEN or CREATE path to
263 * create a new state_owner.
266 static struct nfs4_state_owner
*
267 nfs4_alloc_state_owner(void)
269 struct nfs4_state_owner
*sp
;
271 sp
= kzalloc(sizeof(*sp
),GFP_KERNEL
);
274 spin_lock_init(&sp
->so_lock
);
275 INIT_LIST_HEAD(&sp
->so_states
);
276 INIT_LIST_HEAD(&sp
->so_delegations
);
277 rpc_init_wait_queue(&sp
->so_sequence
.wait
, "Seqid_waitqueue");
278 sp
->so_seqid
.sequence
= &sp
->so_sequence
;
279 spin_lock_init(&sp
->so_sequence
.lock
);
280 INIT_LIST_HEAD(&sp
->so_sequence
.list
);
281 atomic_set(&sp
->so_count
, 1);
286 nfs4_drop_state_owner(struct nfs4_state_owner
*sp
)
288 if (!RB_EMPTY_NODE(&sp
->so_client_node
)) {
289 struct nfs_client
*clp
= sp
->so_client
;
291 spin_lock(&clp
->cl_lock
);
292 rb_erase(&sp
->so_client_node
, &clp
->cl_state_owners
);
293 RB_CLEAR_NODE(&sp
->so_client_node
);
294 spin_unlock(&clp
->cl_lock
);
299 * Note: must be called with clp->cl_sem held in order to prevent races
300 * with reboot recovery!
302 struct nfs4_state_owner
*nfs4_get_state_owner(struct nfs_server
*server
, struct rpc_cred
*cred
)
304 struct nfs_client
*clp
= server
->nfs_client
;
305 struct nfs4_state_owner
*sp
, *new;
307 spin_lock(&clp
->cl_lock
);
308 sp
= nfs4_find_state_owner(server
, cred
);
309 spin_unlock(&clp
->cl_lock
);
312 new = nfs4_alloc_state_owner();
315 new->so_client
= clp
;
316 new->so_server
= server
;
318 spin_lock(&clp
->cl_lock
);
319 sp
= nfs4_insert_state_owner(clp
, new);
320 spin_unlock(&clp
->cl_lock
);
324 rpc_destroy_wait_queue(&new->so_sequence
.wait
);
331 * Must be called with clp->cl_sem held in order to avoid races
332 * with state recovery...
334 void nfs4_put_state_owner(struct nfs4_state_owner
*sp
)
336 struct nfs_client
*clp
= sp
->so_client
;
337 struct rpc_cred
*cred
= sp
->so_cred
;
339 if (!atomic_dec_and_lock(&sp
->so_count
, &clp
->cl_lock
))
341 nfs4_remove_state_owner(clp
, sp
);
342 spin_unlock(&clp
->cl_lock
);
343 rpc_destroy_wait_queue(&sp
->so_sequence
.wait
);
348 static struct nfs4_state
*
349 nfs4_alloc_open_state(void)
351 struct nfs4_state
*state
;
353 state
= kzalloc(sizeof(*state
), GFP_KERNEL
);
356 atomic_set(&state
->count
, 1);
357 INIT_LIST_HEAD(&state
->lock_states
);
358 spin_lock_init(&state
->state_lock
);
359 seqlock_init(&state
->seqlock
);
364 nfs4_state_set_mode_locked(struct nfs4_state
*state
, mode_t mode
)
366 if (state
->state
== mode
)
368 /* NB! List reordering - see the reclaim code for why. */
369 if ((mode
& FMODE_WRITE
) != (state
->state
& FMODE_WRITE
)) {
370 if (mode
& FMODE_WRITE
)
371 list_move(&state
->open_states
, &state
->owner
->so_states
);
373 list_move_tail(&state
->open_states
, &state
->owner
->so_states
);
378 static struct nfs4_state
*
379 __nfs4_find_state_byowner(struct inode
*inode
, struct nfs4_state_owner
*owner
)
381 struct nfs_inode
*nfsi
= NFS_I(inode
);
382 struct nfs4_state
*state
;
384 list_for_each_entry(state
, &nfsi
->open_states
, inode_states
) {
385 if (state
->owner
!= owner
)
387 if (atomic_inc_not_zero(&state
->count
))
394 nfs4_free_open_state(struct nfs4_state
*state
)
400 nfs4_get_open_state(struct inode
*inode
, struct nfs4_state_owner
*owner
)
402 struct nfs4_state
*state
, *new;
403 struct nfs_inode
*nfsi
= NFS_I(inode
);
405 spin_lock(&inode
->i_lock
);
406 state
= __nfs4_find_state_byowner(inode
, owner
);
407 spin_unlock(&inode
->i_lock
);
410 new = nfs4_alloc_open_state();
411 spin_lock(&owner
->so_lock
);
412 spin_lock(&inode
->i_lock
);
413 state
= __nfs4_find_state_byowner(inode
, owner
);
414 if (state
== NULL
&& new != NULL
) {
416 state
->owner
= owner
;
417 atomic_inc(&owner
->so_count
);
418 list_add(&state
->inode_states
, &nfsi
->open_states
);
419 state
->inode
= igrab(inode
);
420 spin_unlock(&inode
->i_lock
);
421 /* Note: The reclaim code dictates that we add stateless
422 * and read-only stateids to the end of the list */
423 list_add_tail(&state
->open_states
, &owner
->so_states
);
424 spin_unlock(&owner
->so_lock
);
426 spin_unlock(&inode
->i_lock
);
427 spin_unlock(&owner
->so_lock
);
429 nfs4_free_open_state(new);
436 * Beware! Caller must be holding exactly one
437 * reference to clp->cl_sem!
439 void nfs4_put_open_state(struct nfs4_state
*state
)
441 struct inode
*inode
= state
->inode
;
442 struct nfs4_state_owner
*owner
= state
->owner
;
444 if (!atomic_dec_and_lock(&state
->count
, &owner
->so_lock
))
446 spin_lock(&inode
->i_lock
);
447 list_del(&state
->inode_states
);
448 list_del(&state
->open_states
);
449 spin_unlock(&inode
->i_lock
);
450 spin_unlock(&owner
->so_lock
);
452 nfs4_free_open_state(state
);
453 nfs4_put_state_owner(owner
);
457 * Close the current file.
459 static void __nfs4_close(struct path
*path
, struct nfs4_state
*state
, mode_t mode
, int wait
)
461 struct nfs4_state_owner
*owner
= state
->owner
;
465 atomic_inc(&owner
->so_count
);
466 /* Protect against nfs4_find_state() */
467 spin_lock(&owner
->so_lock
);
468 switch (mode
& (FMODE_READ
| FMODE_WRITE
)) {
475 case FMODE_READ
|FMODE_WRITE
:
478 newstate
= FMODE_READ
|FMODE_WRITE
;
479 if (state
->n_rdwr
== 0) {
480 if (state
->n_rdonly
== 0) {
481 newstate
&= ~FMODE_READ
;
482 call_close
|= test_bit(NFS_O_RDONLY_STATE
, &state
->flags
);
483 call_close
|= test_bit(NFS_O_RDWR_STATE
, &state
->flags
);
485 if (state
->n_wronly
== 0) {
486 newstate
&= ~FMODE_WRITE
;
487 call_close
|= test_bit(NFS_O_WRONLY_STATE
, &state
->flags
);
488 call_close
|= test_bit(NFS_O_RDWR_STATE
, &state
->flags
);
491 clear_bit(NFS_DELEGATED_STATE
, &state
->flags
);
493 nfs4_state_set_mode_locked(state
, newstate
);
494 spin_unlock(&owner
->so_lock
);
497 nfs4_put_open_state(state
);
498 nfs4_put_state_owner(owner
);
500 nfs4_do_close(path
, state
, wait
);
503 void nfs4_close_state(struct path
*path
, struct nfs4_state
*state
, mode_t mode
)
505 __nfs4_close(path
, state
, mode
, 0);
508 void nfs4_close_sync(struct path
*path
, struct nfs4_state
*state
, mode_t mode
)
510 __nfs4_close(path
, state
, mode
, 1);
514 * Search the state->lock_states for an existing lock_owner
515 * that is compatible with current->files
517 static struct nfs4_lock_state
*
518 __nfs4_find_lock_state(struct nfs4_state
*state
, fl_owner_t fl_owner
)
520 struct nfs4_lock_state
*pos
;
521 list_for_each_entry(pos
, &state
->lock_states
, ls_locks
) {
522 if (pos
->ls_owner
!= fl_owner
)
524 atomic_inc(&pos
->ls_count
);
531 * Return a compatible lock_state. If no initialized lock_state structure
532 * exists, return an uninitialized one.
535 static struct nfs4_lock_state
*nfs4_alloc_lock_state(struct nfs4_state
*state
, fl_owner_t fl_owner
)
537 struct nfs4_lock_state
*lsp
;
538 struct nfs_client
*clp
= state
->owner
->so_client
;
540 lsp
= kzalloc(sizeof(*lsp
), GFP_KERNEL
);
543 rpc_init_wait_queue(&lsp
->ls_sequence
.wait
, "lock_seqid_waitqueue");
544 spin_lock_init(&lsp
->ls_sequence
.lock
);
545 INIT_LIST_HEAD(&lsp
->ls_sequence
.list
);
546 lsp
->ls_seqid
.sequence
= &lsp
->ls_sequence
;
547 atomic_set(&lsp
->ls_count
, 1);
548 lsp
->ls_owner
= fl_owner
;
549 spin_lock(&clp
->cl_lock
);
550 nfs_alloc_unique_id(&clp
->cl_lockowner_id
, &lsp
->ls_id
, 1, 64);
551 spin_unlock(&clp
->cl_lock
);
552 INIT_LIST_HEAD(&lsp
->ls_locks
);
556 static void nfs4_free_lock_state(struct nfs4_lock_state
*lsp
)
558 struct nfs_client
*clp
= lsp
->ls_state
->owner
->so_client
;
560 spin_lock(&clp
->cl_lock
);
561 nfs_free_unique_id(&clp
->cl_lockowner_id
, &lsp
->ls_id
);
562 spin_unlock(&clp
->cl_lock
);
563 rpc_destroy_wait_queue(&lsp
->ls_sequence
.wait
);
568 * Return a compatible lock_state. If no initialized lock_state structure
569 * exists, return an uninitialized one.
571 * The caller must be holding clp->cl_sem
573 static struct nfs4_lock_state
*nfs4_get_lock_state(struct nfs4_state
*state
, fl_owner_t owner
)
575 struct nfs4_lock_state
*lsp
, *new = NULL
;
578 spin_lock(&state
->state_lock
);
579 lsp
= __nfs4_find_lock_state(state
, owner
);
583 new->ls_state
= state
;
584 list_add(&new->ls_locks
, &state
->lock_states
);
585 set_bit(LK_STATE_IN_USE
, &state
->flags
);
590 spin_unlock(&state
->state_lock
);
591 new = nfs4_alloc_lock_state(state
, owner
);
595 spin_unlock(&state
->state_lock
);
597 nfs4_free_lock_state(new);
602 * Release reference to lock_state, and free it if we see that
603 * it is no longer in use
605 void nfs4_put_lock_state(struct nfs4_lock_state
*lsp
)
607 struct nfs4_state
*state
;
611 state
= lsp
->ls_state
;
612 if (!atomic_dec_and_lock(&lsp
->ls_count
, &state
->state_lock
))
614 list_del(&lsp
->ls_locks
);
615 if (list_empty(&state
->lock_states
))
616 clear_bit(LK_STATE_IN_USE
, &state
->flags
);
617 spin_unlock(&state
->state_lock
);
618 nfs4_free_lock_state(lsp
);
621 static void nfs4_fl_copy_lock(struct file_lock
*dst
, struct file_lock
*src
)
623 struct nfs4_lock_state
*lsp
= src
->fl_u
.nfs4_fl
.owner
;
625 dst
->fl_u
.nfs4_fl
.owner
= lsp
;
626 atomic_inc(&lsp
->ls_count
);
629 static void nfs4_fl_release_lock(struct file_lock
*fl
)
631 nfs4_put_lock_state(fl
->fl_u
.nfs4_fl
.owner
);
634 static struct file_lock_operations nfs4_fl_lock_ops
= {
635 .fl_copy_lock
= nfs4_fl_copy_lock
,
636 .fl_release_private
= nfs4_fl_release_lock
,
639 int nfs4_set_lock_state(struct nfs4_state
*state
, struct file_lock
*fl
)
641 struct nfs4_lock_state
*lsp
;
643 if (fl
->fl_ops
!= NULL
)
645 lsp
= nfs4_get_lock_state(state
, fl
->fl_owner
);
648 fl
->fl_u
.nfs4_fl
.owner
= lsp
;
649 fl
->fl_ops
= &nfs4_fl_lock_ops
;
654 * Byte-range lock aware utility to initialize the stateid of read/write
657 void nfs4_copy_stateid(nfs4_stateid
*dst
, struct nfs4_state
*state
, fl_owner_t fl_owner
)
659 struct nfs4_lock_state
*lsp
;
663 seq
= read_seqbegin(&state
->seqlock
);
664 memcpy(dst
, &state
->stateid
, sizeof(*dst
));
665 } while (read_seqretry(&state
->seqlock
, seq
));
666 if (test_bit(LK_STATE_IN_USE
, &state
->flags
) == 0)
669 spin_lock(&state
->state_lock
);
670 lsp
= __nfs4_find_lock_state(state
, fl_owner
);
671 if (lsp
!= NULL
&& (lsp
->ls_flags
& NFS_LOCK_INITIALIZED
) != 0)
672 memcpy(dst
, &lsp
->ls_stateid
, sizeof(*dst
));
673 spin_unlock(&state
->state_lock
);
674 nfs4_put_lock_state(lsp
);
677 struct nfs_seqid
*nfs_alloc_seqid(struct nfs_seqid_counter
*counter
)
679 struct nfs_seqid
*new;
681 new = kmalloc(sizeof(*new), GFP_KERNEL
);
683 new->sequence
= counter
;
684 INIT_LIST_HEAD(&new->list
);
689 void nfs_free_seqid(struct nfs_seqid
*seqid
)
691 if (!list_empty(&seqid
->list
)) {
692 struct rpc_sequence
*sequence
= seqid
->sequence
->sequence
;
694 spin_lock(&sequence
->lock
);
695 list_del(&seqid
->list
);
696 spin_unlock(&sequence
->lock
);
697 rpc_wake_up(&sequence
->wait
);
703 * Increment the seqid if the OPEN/OPEN_DOWNGRADE/CLOSE succeeded, or
704 * failed with a seqid incrementing error -
705 * see comments nfs_fs.h:seqid_mutating_error()
707 static void nfs_increment_seqid(int status
, struct nfs_seqid
*seqid
)
709 BUG_ON(list_first_entry(&seqid
->sequence
->sequence
->list
, struct nfs_seqid
, list
) != seqid
);
713 case -NFS4ERR_BAD_SEQID
:
714 if (seqid
->sequence
->flags
& NFS_SEQID_CONFIRMED
)
716 printk(KERN_WARNING
"NFS: v4 server returned a bad"
717 " sequence-id error on an"
718 " unconfirmed sequence %p!\n",
720 case -NFS4ERR_STALE_CLIENTID
:
721 case -NFS4ERR_STALE_STATEID
:
722 case -NFS4ERR_BAD_STATEID
:
723 case -NFS4ERR_BADXDR
:
724 case -NFS4ERR_RESOURCE
:
725 case -NFS4ERR_NOFILEHANDLE
:
726 /* Non-seqid mutating errors */
730 * Note: no locking needed as we are guaranteed to be first
731 * on the sequence list
733 seqid
->sequence
->counter
++;
736 void nfs_increment_open_seqid(int status
, struct nfs_seqid
*seqid
)
738 if (status
== -NFS4ERR_BAD_SEQID
) {
739 struct nfs4_state_owner
*sp
= container_of(seqid
->sequence
,
740 struct nfs4_state_owner
, so_seqid
);
741 nfs4_drop_state_owner(sp
);
743 nfs_increment_seqid(status
, seqid
);
747 * Increment the seqid if the LOCK/LOCKU succeeded, or
748 * failed with a seqid incrementing error -
749 * see comments nfs_fs.h:seqid_mutating_error()
751 void nfs_increment_lock_seqid(int status
, struct nfs_seqid
*seqid
)
753 nfs_increment_seqid(status
, seqid
);
756 int nfs_wait_on_sequence(struct nfs_seqid
*seqid
, struct rpc_task
*task
)
758 struct rpc_sequence
*sequence
= seqid
->sequence
->sequence
;
761 spin_lock(&sequence
->lock
);
762 if (list_empty(&seqid
->list
))
763 list_add_tail(&seqid
->list
, &sequence
->list
);
764 if (list_first_entry(&sequence
->list
, struct nfs_seqid
, list
) == seqid
)
766 rpc_sleep_on(&sequence
->wait
, task
, NULL
);
769 spin_unlock(&sequence
->lock
);
773 static int reclaimer(void *);
775 static inline void nfs4_clear_recover_bit(struct nfs_client
*clp
)
777 smp_mb__before_clear_bit();
778 clear_bit(NFS4CLNT_STATE_RECOVER
, &clp
->cl_state
);
779 smp_mb__after_clear_bit();
780 wake_up_bit(&clp
->cl_state
, NFS4CLNT_STATE_RECOVER
);
781 rpc_wake_up(&clp
->cl_rpcwaitq
);
785 * State recovery routine
787 static void nfs4_recover_state(struct nfs_client
*clp
)
789 struct task_struct
*task
;
791 __module_get(THIS_MODULE
);
792 atomic_inc(&clp
->cl_count
);
793 task
= kthread_run(reclaimer
, clp
, "%s-reclaim",
794 rpc_peeraddr2str(clp
->cl_rpcclient
,
798 nfs4_clear_recover_bit(clp
);
800 module_put(THIS_MODULE
);
804 * Schedule a state recovery attempt
806 void nfs4_schedule_state_recovery(struct nfs_client
*clp
)
810 if (test_and_set_bit(NFS4CLNT_STATE_RECOVER
, &clp
->cl_state
) == 0)
811 nfs4_recover_state(clp
);
814 static int nfs4_reclaim_locks(struct nfs4_state_recovery_ops
*ops
, struct nfs4_state
*state
)
816 struct inode
*inode
= state
->inode
;
817 struct file_lock
*fl
;
820 for (fl
= inode
->i_flock
; fl
!= NULL
; fl
= fl
->fl_next
) {
821 if (!(fl
->fl_flags
& (FL_POSIX
|FL_FLOCK
)))
823 if (nfs_file_open_context(fl
->fl_file
)->state
!= state
)
825 status
= ops
->recover_lock(state
, fl
);
830 printk(KERN_ERR
"%s: unhandled error %d. Zeroing state\n",
831 __FUNCTION__
, status
);
832 case -NFS4ERR_EXPIRED
:
833 case -NFS4ERR_NO_GRACE
:
834 case -NFS4ERR_RECLAIM_BAD
:
835 case -NFS4ERR_RECLAIM_CONFLICT
:
836 /* kill_proc(fl->fl_pid, SIGLOST, 1); */
838 case -NFS4ERR_STALE_CLIENTID
:
847 static int nfs4_reclaim_open_state(struct nfs4_state_recovery_ops
*ops
, struct nfs4_state_owner
*sp
)
849 struct nfs4_state
*state
;
850 struct nfs4_lock_state
*lock
;
853 /* Note: we rely on the sp->so_states list being ordered
854 * so that we always reclaim open(O_RDWR) and/or open(O_WRITE)
856 * This is needed to ensure that the server won't give us any
857 * read delegations that we have to return if, say, we are
858 * recovering after a network partition or a reboot from a
859 * server that doesn't support a grace period.
861 list_for_each_entry(state
, &sp
->so_states
, open_states
) {
862 if (state
->state
== 0)
864 status
= ops
->recover_open(sp
, state
);
866 status
= nfs4_reclaim_locks(ops
, state
);
869 list_for_each_entry(lock
, &state
->lock_states
, ls_locks
) {
870 if (!(lock
->ls_flags
& NFS_LOCK_INITIALIZED
))
871 printk("%s: Lock reclaim failed!\n",
878 printk(KERN_ERR
"%s: unhandled error %d. Zeroing state\n",
879 __FUNCTION__
, status
);
881 case -NFS4ERR_RECLAIM_BAD
:
882 case -NFS4ERR_RECLAIM_CONFLICT
:
884 * Open state on this file cannot be recovered
885 * All we can do is revert to using the zero stateid.
887 memset(state
->stateid
.data
, 0,
888 sizeof(state
->stateid
.data
));
889 /* Mark the file as being 'closed' */
892 case -NFS4ERR_EXPIRED
:
893 case -NFS4ERR_NO_GRACE
:
894 case -NFS4ERR_STALE_CLIENTID
:
903 static void nfs4_state_mark_reclaim(struct nfs_client
*clp
)
905 struct nfs4_state_owner
*sp
;
907 struct nfs4_state
*state
;
908 struct nfs4_lock_state
*lock
;
910 /* Reset all sequence ids to zero */
911 for (pos
= rb_first(&clp
->cl_state_owners
); pos
!= NULL
; pos
= rb_next(pos
)) {
912 sp
= rb_entry(pos
, struct nfs4_state_owner
, so_client_node
);
913 sp
->so_seqid
.counter
= 0;
914 sp
->so_seqid
.flags
= 0;
915 spin_lock(&sp
->so_lock
);
916 list_for_each_entry(state
, &sp
->so_states
, open_states
) {
917 clear_bit(NFS_DELEGATED_STATE
, &state
->flags
);
918 clear_bit(NFS_O_RDONLY_STATE
, &state
->flags
);
919 clear_bit(NFS_O_WRONLY_STATE
, &state
->flags
);
920 clear_bit(NFS_O_RDWR_STATE
, &state
->flags
);
921 list_for_each_entry(lock
, &state
->lock_states
, ls_locks
) {
922 lock
->ls_seqid
.counter
= 0;
923 lock
->ls_seqid
.flags
= 0;
924 lock
->ls_flags
&= ~NFS_LOCK_INITIALIZED
;
927 spin_unlock(&sp
->so_lock
);
931 static int reclaimer(void *ptr
)
933 struct nfs_client
*clp
= ptr
;
934 struct nfs4_state_owner
*sp
;
936 struct nfs4_state_recovery_ops
*ops
;
937 struct rpc_cred
*cred
;
940 allow_signal(SIGKILL
);
942 /* Ensure exclusive access to NFSv4 state */
944 down_write(&clp
->cl_sem
);
945 /* Are there any NFS mounts out there? */
946 if (list_empty(&clp
->cl_superblocks
))
949 ops
= &nfs4_network_partition_recovery_ops
;
950 /* Are there any open files on this volume? */
951 cred
= nfs4_get_renew_cred(clp
);
953 /* Yes there are: try to renew the old lease */
954 status
= nfs4_proc_renew(clp
, cred
);
958 case -NFS4ERR_CB_PATH_DOWN
:
960 case -NFS4ERR_STALE_CLIENTID
:
961 case -NFS4ERR_LEASE_MOVED
:
962 ops
= &nfs4_reboot_recovery_ops
;
965 /* "reboot" to ensure we clear all state on the server */
966 clp
->cl_boot_time
= CURRENT_TIME
;
968 /* We're going to have to re-establish a clientid */
969 nfs4_state_mark_reclaim(clp
);
971 cred
= nfs4_get_setclientid_cred(clp
);
973 status
= nfs4_init_client(clp
, cred
);
975 /* Handle case where the user hasn't set up machine creds */
976 if (status
== -EACCES
&& cred
== clp
->cl_machine_cred
) {
977 nfs4_clear_machine_cred(clp
);
983 /* Mark all delegations for reclaim */
984 nfs_delegation_mark_reclaim(clp
);
985 /* Note: list is protected by exclusive lock on cl->cl_sem */
986 for (pos
= rb_first(&clp
->cl_state_owners
); pos
!= NULL
; pos
= rb_next(pos
)) {
987 sp
= rb_entry(pos
, struct nfs4_state_owner
, so_client_node
);
988 status
= nfs4_reclaim_open_state(ops
, sp
);
990 if (status
== -NFS4ERR_NO_GRACE
) {
991 ops
= &nfs4_network_partition_recovery_ops
;
992 status
= nfs4_reclaim_open_state(ops
, sp
);
994 if (status
== -NFS4ERR_STALE_CLIENTID
)
996 if (status
== -NFS4ERR_EXPIRED
)
1000 nfs_delegation_reap_unclaimed(clp
);
1002 up_write(&clp
->cl_sem
);
1004 if (status
== -NFS4ERR_CB_PATH_DOWN
)
1005 nfs_handle_cb_pathdown(clp
);
1006 nfs4_clear_recover_bit(clp
);
1007 nfs_put_client(clp
);
1008 module_put_and_exit(0);
1011 printk(KERN_WARNING
"Error: state recovery failed on NFSv4 server %s"
1012 " with error %d\n", clp
->cl_hostname
, -status
);
1013 set_bit(NFS4CLNT_LEASE_EXPIRED
, &clp
->cl_state
);