2 * linux/fs/nfs/delegation.c
4 * Copyright (C) 2004 Trond Myklebust
6 * NFS file delegation management
9 #include <linux/completion.h>
10 #include <linux/kthread.h>
11 #include <linux/module.h>
12 #include <linux/sched.h>
13 #include <linux/spinlock.h>
15 #include <linux/nfs4.h>
16 #include <linux/nfs_fs.h>
17 #include <linux/nfs_xdr.h>
20 #include "delegation.h"
23 static void nfs_do_free_delegation(struct nfs_delegation
*delegation
)
28 static void nfs_free_delegation_callback(struct rcu_head
*head
)
30 struct nfs_delegation
*delegation
= container_of(head
, struct nfs_delegation
, rcu
);
32 nfs_do_free_delegation(delegation
);
35 static void nfs_free_delegation(struct nfs_delegation
*delegation
)
37 struct rpc_cred
*cred
;
39 cred
= rcu_dereference(delegation
->cred
);
40 rcu_assign_pointer(delegation
->cred
, NULL
);
41 call_rcu(&delegation
->rcu
, nfs_free_delegation_callback
);
46 static int nfs_delegation_claim_locks(struct nfs_open_context
*ctx
, struct nfs4_state
*state
)
48 struct inode
*inode
= state
->inode
;
52 for (fl
= inode
->i_flock
; fl
!= 0; fl
= fl
->fl_next
) {
53 if (!(fl
->fl_flags
& (FL_POSIX
|FL_FLOCK
)))
55 if (nfs_file_open_context(fl
->fl_file
) != ctx
)
57 status
= nfs4_lock_delegation_recall(state
, fl
);
62 printk(KERN_ERR
"%s: unhandled error %d.\n",
63 __FUNCTION__
, status
);
64 case -NFS4ERR_EXPIRED
:
65 /* kill_proc(fl->fl_pid, SIGLOST, 1); */
66 case -NFS4ERR_STALE_CLIENTID
:
67 nfs4_schedule_state_recovery(NFS_SERVER(inode
)->nfs_client
);
76 static void nfs_delegation_claim_opens(struct inode
*inode
, const nfs4_stateid
*stateid
)
78 struct nfs_inode
*nfsi
= NFS_I(inode
);
79 struct nfs_open_context
*ctx
;
80 struct nfs4_state
*state
;
84 spin_lock(&inode
->i_lock
);
85 list_for_each_entry(ctx
, &nfsi
->open_files
, list
) {
89 if (!test_bit(NFS_DELEGATED_STATE
, &state
->flags
))
91 if (memcmp(state
->stateid
.data
, stateid
->data
, sizeof(state
->stateid
.data
)) != 0)
93 get_nfs_open_context(ctx
);
94 spin_unlock(&inode
->i_lock
);
95 err
= nfs4_open_delegation_recall(ctx
, state
, stateid
);
97 err
= nfs_delegation_claim_locks(ctx
, state
);
98 put_nfs_open_context(ctx
);
103 spin_unlock(&inode
->i_lock
);
107 * Set up a delegation on an inode
109 void nfs_inode_reclaim_delegation(struct inode
*inode
, struct rpc_cred
*cred
, struct nfs_openres
*res
)
111 struct nfs_delegation
*delegation
= NFS_I(inode
)->delegation
;
112 struct rpc_cred
*oldcred
;
114 if (delegation
== NULL
)
116 memcpy(delegation
->stateid
.data
, res
->delegation
.data
,
117 sizeof(delegation
->stateid
.data
));
118 delegation
->type
= res
->delegation_type
;
119 delegation
->maxsize
= res
->maxsize
;
120 oldcred
= delegation
->cred
;
121 delegation
->cred
= get_rpccred(cred
);
122 delegation
->flags
&= ~NFS_DELEGATION_NEED_RECLAIM
;
123 NFS_I(inode
)->delegation_state
= delegation
->type
;
125 put_rpccred(oldcred
);
129 * Set up a delegation on an inode
131 int nfs_inode_set_delegation(struct inode
*inode
, struct rpc_cred
*cred
, struct nfs_openres
*res
)
133 struct nfs_client
*clp
= NFS_SERVER(inode
)->nfs_client
;
134 struct nfs_inode
*nfsi
= NFS_I(inode
);
135 struct nfs_delegation
*delegation
;
138 delegation
= kmalloc(sizeof(*delegation
), GFP_KERNEL
);
139 if (delegation
== NULL
)
141 memcpy(delegation
->stateid
.data
, res
->delegation
.data
,
142 sizeof(delegation
->stateid
.data
));
143 delegation
->type
= res
->delegation_type
;
144 delegation
->maxsize
= res
->maxsize
;
145 delegation
->change_attr
= nfsi
->change_attr
;
146 delegation
->cred
= get_rpccred(cred
);
147 delegation
->inode
= inode
;
149 spin_lock(&clp
->cl_lock
);
150 if (rcu_dereference(nfsi
->delegation
) == NULL
) {
151 list_add_rcu(&delegation
->super_list
, &clp
->cl_delegations
);
152 nfsi
->delegation_state
= delegation
->type
;
153 rcu_assign_pointer(nfsi
->delegation
, delegation
);
156 if (memcmp(&delegation
->stateid
, &nfsi
->delegation
->stateid
,
157 sizeof(delegation
->stateid
)) != 0 ||
158 delegation
->type
!= nfsi
->delegation
->type
) {
159 printk("%s: server %u.%u.%u.%u, handed out a duplicate delegation!\n",
160 __FUNCTION__
, NIPQUAD(clp
->cl_addr
.sin_addr
));
165 /* Ensure we revalidate the attributes and page cache! */
166 spin_lock(&inode
->i_lock
);
167 nfsi
->cache_validity
|= NFS_INO_REVAL_FORCED
;
168 spin_unlock(&inode
->i_lock
);
170 spin_unlock(&clp
->cl_lock
);
175 static int nfs_do_return_delegation(struct inode
*inode
, struct nfs_delegation
*delegation
)
179 res
= nfs4_proc_delegreturn(inode
, delegation
->cred
, &delegation
->stateid
);
180 nfs_free_delegation(delegation
);
184 /* Sync all data to disk upon delegation return */
185 static void nfs_msync_inode(struct inode
*inode
)
187 filemap_fdatawrite(inode
->i_mapping
);
189 filemap_fdatawait(inode
->i_mapping
);
193 * Basic procedure for returning a delegation to the server
195 static int __nfs_inode_return_delegation(struct inode
*inode
, struct nfs_delegation
*delegation
)
197 struct nfs_client
*clp
= NFS_SERVER(inode
)->nfs_client
;
198 struct nfs_inode
*nfsi
= NFS_I(inode
);
200 nfs_msync_inode(inode
);
201 down_read(&clp
->cl_sem
);
202 /* Guard against new delegated open calls */
203 down_write(&nfsi
->rwsem
);
204 nfs_delegation_claim_opens(inode
, &delegation
->stateid
);
205 up_write(&nfsi
->rwsem
);
206 up_read(&clp
->cl_sem
);
207 nfs_msync_inode(inode
);
209 return nfs_do_return_delegation(inode
, delegation
);
212 static struct nfs_delegation
*nfs_detach_delegation_locked(struct nfs_inode
*nfsi
, const nfs4_stateid
*stateid
)
214 struct nfs_delegation
*delegation
= rcu_dereference(nfsi
->delegation
);
216 if (delegation
== NULL
)
218 if (stateid
!= NULL
&& memcmp(delegation
->stateid
.data
, stateid
->data
,
219 sizeof(delegation
->stateid
.data
)) != 0)
221 list_del_rcu(&delegation
->super_list
);
222 nfsi
->delegation_state
= 0;
223 rcu_assign_pointer(nfsi
->delegation
, NULL
);
229 int nfs_inode_return_delegation(struct inode
*inode
)
231 struct nfs_client
*clp
= NFS_SERVER(inode
)->nfs_client
;
232 struct nfs_inode
*nfsi
= NFS_I(inode
);
233 struct nfs_delegation
*delegation
;
236 if (rcu_dereference(nfsi
->delegation
) != NULL
) {
237 spin_lock(&clp
->cl_lock
);
238 delegation
= nfs_detach_delegation_locked(nfsi
, NULL
);
239 spin_unlock(&clp
->cl_lock
);
240 if (delegation
!= NULL
)
241 err
= __nfs_inode_return_delegation(inode
, delegation
);
247 * Return all delegations associated to a super block
249 void nfs_return_all_delegations(struct super_block
*sb
)
251 struct nfs_client
*clp
= NFS_SB(sb
)->nfs_client
;
252 struct nfs_delegation
*delegation
;
259 list_for_each_entry_rcu(delegation
, &clp
->cl_delegations
, super_list
) {
260 if (delegation
->inode
->i_sb
!= sb
)
262 inode
= igrab(delegation
->inode
);
265 spin_lock(&clp
->cl_lock
);
266 delegation
= nfs_detach_delegation_locked(NFS_I(inode
), NULL
);
267 spin_unlock(&clp
->cl_lock
);
269 if (delegation
!= NULL
)
270 __nfs_inode_return_delegation(inode
, delegation
);
277 static int nfs_do_expire_all_delegations(void *ptr
)
279 struct nfs_client
*clp
= ptr
;
280 struct nfs_delegation
*delegation
;
283 allow_signal(SIGKILL
);
285 if (test_bit(NFS4CLNT_STATE_RECOVER
, &clp
->cl_state
) != 0)
287 if (test_bit(NFS4CLNT_LEASE_EXPIRED
, &clp
->cl_state
) == 0)
290 list_for_each_entry_rcu(delegation
, &clp
->cl_delegations
, super_list
) {
291 inode
= igrab(delegation
->inode
);
294 spin_lock(&clp
->cl_lock
);
295 delegation
= nfs_detach_delegation_locked(NFS_I(inode
), NULL
);
296 spin_unlock(&clp
->cl_lock
);
299 __nfs_inode_return_delegation(inode
, delegation
);
306 module_put_and_exit(0);
309 void nfs_expire_all_delegations(struct nfs_client
*clp
)
311 struct task_struct
*task
;
313 __module_get(THIS_MODULE
);
314 atomic_inc(&clp
->cl_count
);
315 task
= kthread_run(nfs_do_expire_all_delegations
, clp
,
316 "%u.%u.%u.%u-delegreturn",
317 NIPQUAD(clp
->cl_addr
.sin_addr
));
321 module_put(THIS_MODULE
);
325 * Return all delegations following an NFS4ERR_CB_PATH_DOWN error.
327 void nfs_handle_cb_pathdown(struct nfs_client
*clp
)
329 struct nfs_delegation
*delegation
;
336 list_for_each_entry_rcu(delegation
, &clp
->cl_delegations
, super_list
) {
337 inode
= igrab(delegation
->inode
);
340 spin_lock(&clp
->cl_lock
);
341 delegation
= nfs_detach_delegation_locked(NFS_I(inode
), NULL
);
342 spin_unlock(&clp
->cl_lock
);
344 if (delegation
!= NULL
)
345 __nfs_inode_return_delegation(inode
, delegation
);
352 struct recall_threadargs
{
354 struct nfs_client
*clp
;
355 const nfs4_stateid
*stateid
;
357 struct completion started
;
361 static int recall_thread(void *data
)
363 struct recall_threadargs
*args
= (struct recall_threadargs
*)data
;
364 struct inode
*inode
= igrab(args
->inode
);
365 struct nfs_client
*clp
= NFS_SERVER(inode
)->nfs_client
;
366 struct nfs_inode
*nfsi
= NFS_I(inode
);
367 struct nfs_delegation
*delegation
;
369 daemonize("nfsv4-delegreturn");
371 nfs_msync_inode(inode
);
372 down_read(&clp
->cl_sem
);
373 down_write(&nfsi
->rwsem
);
374 spin_lock(&clp
->cl_lock
);
375 delegation
= nfs_detach_delegation_locked(nfsi
, args
->stateid
);
376 if (delegation
!= NULL
)
379 args
->result
= -ENOENT
;
380 spin_unlock(&clp
->cl_lock
);
381 complete(&args
->started
);
382 nfs_delegation_claim_opens(inode
, args
->stateid
);
383 up_write(&nfsi
->rwsem
);
384 up_read(&clp
->cl_sem
);
385 nfs_msync_inode(inode
);
387 if (delegation
!= NULL
)
388 nfs_do_return_delegation(inode
, delegation
);
390 module_put_and_exit(0);
394 * Asynchronous delegation recall!
396 int nfs_async_inode_return_delegation(struct inode
*inode
, const nfs4_stateid
*stateid
)
398 struct recall_threadargs data
= {
404 init_completion(&data
.started
);
405 __module_get(THIS_MODULE
);
406 status
= kernel_thread(recall_thread
, &data
, CLONE_KERNEL
);
409 wait_for_completion(&data
.started
);
412 module_put(THIS_MODULE
);
417 * Retrieve the inode associated with a delegation
419 struct inode
*nfs_delegation_find_inode(struct nfs_client
*clp
, const struct nfs_fh
*fhandle
)
421 struct nfs_delegation
*delegation
;
422 struct inode
*res
= NULL
;
424 list_for_each_entry_rcu(delegation
, &clp
->cl_delegations
, super_list
) {
425 if (nfs_compare_fh(fhandle
, &NFS_I(delegation
->inode
)->fh
) == 0) {
426 res
= igrab(delegation
->inode
);
435 * Mark all delegations as needing to be reclaimed
437 void nfs_delegation_mark_reclaim(struct nfs_client
*clp
)
439 struct nfs_delegation
*delegation
;
441 list_for_each_entry_rcu(delegation
, &clp
->cl_delegations
, super_list
)
442 delegation
->flags
|= NFS_DELEGATION_NEED_RECLAIM
;
447 * Reap all unclaimed delegations after reboot recovery is done
449 void nfs_delegation_reap_unclaimed(struct nfs_client
*clp
)
451 struct nfs_delegation
*delegation
;
454 list_for_each_entry_rcu(delegation
, &clp
->cl_delegations
, super_list
) {
455 if ((delegation
->flags
& NFS_DELEGATION_NEED_RECLAIM
) == 0)
457 spin_lock(&clp
->cl_lock
);
458 delegation
= nfs_detach_delegation_locked(NFS_I(delegation
->inode
), NULL
);
459 spin_unlock(&clp
->cl_lock
);
461 if (delegation
!= NULL
)
462 nfs_free_delegation(delegation
);
468 int nfs4_copy_delegation_stateid(nfs4_stateid
*dst
, struct inode
*inode
)
470 struct nfs_inode
*nfsi
= NFS_I(inode
);
471 struct nfs_delegation
*delegation
;
475 delegation
= rcu_dereference(nfsi
->delegation
);
476 if (delegation
!= NULL
) {
477 memcpy(dst
->data
, delegation
->stateid
.data
, sizeof(dst
->data
));