Merge branch 'for-linus' of git://git390.osdl.marist.edu/pub/scm/linux-2.6
[linux-2.6/openmoko-kernel/knife-kernel.git] / fs / nfs / delegation.c
blob00a5e4405e16e2c41475f27c28ed7cc75c5a7bf7
1 /*
2 * linux/fs/nfs/delegation.c
4 * Copyright (C) 2004 Trond Myklebust
6 * NFS file delegation management
8 */
9 #include <linux/completion.h>
10 #include <linux/kthread.h>
11 #include <linux/module.h>
12 #include <linux/sched.h>
13 #include <linux/spinlock.h>
15 #include <linux/nfs4.h>
16 #include <linux/nfs_fs.h>
17 #include <linux/nfs_xdr.h>
19 #include "nfs4_fs.h"
20 #include "delegation.h"
21 #include "internal.h"
23 static void nfs_do_free_delegation(struct nfs_delegation *delegation)
25 kfree(delegation);
28 static void nfs_free_delegation_callback(struct rcu_head *head)
30 struct nfs_delegation *delegation = container_of(head, struct nfs_delegation, rcu);
32 nfs_do_free_delegation(delegation);
35 static void nfs_free_delegation(struct nfs_delegation *delegation)
37 struct rpc_cred *cred;
39 cred = rcu_dereference(delegation->cred);
40 rcu_assign_pointer(delegation->cred, NULL);
41 call_rcu(&delegation->rcu, nfs_free_delegation_callback);
42 if (cred)
43 put_rpccred(cred);
46 static int nfs_delegation_claim_locks(struct nfs_open_context *ctx, struct nfs4_state *state)
48 struct inode *inode = state->inode;
49 struct file_lock *fl;
50 int status;
52 for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
53 if (!(fl->fl_flags & (FL_POSIX|FL_FLOCK)))
54 continue;
55 if (nfs_file_open_context(fl->fl_file) != ctx)
56 continue;
57 status = nfs4_lock_delegation_recall(state, fl);
58 if (status >= 0)
59 continue;
60 switch (status) {
61 default:
62 printk(KERN_ERR "%s: unhandled error %d.\n",
63 __FUNCTION__, status);
64 case -NFS4ERR_EXPIRED:
65 /* kill_proc(fl->fl_pid, SIGLOST, 1); */
66 case -NFS4ERR_STALE_CLIENTID:
67 nfs4_schedule_state_recovery(NFS_SERVER(inode)->nfs_client);
68 goto out_err;
71 return 0;
72 out_err:
73 return status;
76 static void nfs_delegation_claim_opens(struct inode *inode, const nfs4_stateid *stateid)
78 struct nfs_inode *nfsi = NFS_I(inode);
79 struct nfs_open_context *ctx;
80 struct nfs4_state *state;
81 int err;
83 again:
84 spin_lock(&inode->i_lock);
85 list_for_each_entry(ctx, &nfsi->open_files, list) {
86 state = ctx->state;
87 if (state == NULL)
88 continue;
89 if (!test_bit(NFS_DELEGATED_STATE, &state->flags))
90 continue;
91 if (memcmp(state->stateid.data, stateid->data, sizeof(state->stateid.data)) != 0)
92 continue;
93 get_nfs_open_context(ctx);
94 spin_unlock(&inode->i_lock);
95 err = nfs4_open_delegation_recall(ctx, state, stateid);
96 if (err >= 0)
97 err = nfs_delegation_claim_locks(ctx, state);
98 put_nfs_open_context(ctx);
99 if (err != 0)
100 return;
101 goto again;
103 spin_unlock(&inode->i_lock);
107 * Set up a delegation on an inode
109 void nfs_inode_reclaim_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res)
111 struct nfs_delegation *delegation = NFS_I(inode)->delegation;
112 struct rpc_cred *oldcred;
114 if (delegation == NULL)
115 return;
116 memcpy(delegation->stateid.data, res->delegation.data,
117 sizeof(delegation->stateid.data));
118 delegation->type = res->delegation_type;
119 delegation->maxsize = res->maxsize;
120 oldcred = delegation->cred;
121 delegation->cred = get_rpccred(cred);
122 delegation->flags &= ~NFS_DELEGATION_NEED_RECLAIM;
123 NFS_I(inode)->delegation_state = delegation->type;
124 smp_wmb();
125 put_rpccred(oldcred);
128 static int nfs_do_return_delegation(struct inode *inode, struct nfs_delegation *delegation, int issync)
130 int res = 0;
132 res = nfs4_proc_delegreturn(inode, delegation->cred, &delegation->stateid, issync);
133 nfs_free_delegation(delegation);
134 return res;
137 static struct nfs_delegation *nfs_detach_delegation_locked(struct nfs_inode *nfsi, const nfs4_stateid *stateid)
139 struct nfs_delegation *delegation = rcu_dereference(nfsi->delegation);
141 if (delegation == NULL)
142 goto nomatch;
143 if (stateid != NULL && memcmp(delegation->stateid.data, stateid->data,
144 sizeof(delegation->stateid.data)) != 0)
145 goto nomatch;
146 list_del_rcu(&delegation->super_list);
147 nfsi->delegation_state = 0;
148 rcu_assign_pointer(nfsi->delegation, NULL);
149 return delegation;
150 nomatch:
151 return NULL;
155 * Set up a delegation on an inode
157 int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res)
159 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
160 struct nfs_inode *nfsi = NFS_I(inode);
161 struct nfs_delegation *delegation;
162 struct nfs_delegation *freeme = NULL;
163 int status = 0;
165 delegation = kmalloc(sizeof(*delegation), GFP_KERNEL);
166 if (delegation == NULL)
167 return -ENOMEM;
168 memcpy(delegation->stateid.data, res->delegation.data,
169 sizeof(delegation->stateid.data));
170 delegation->type = res->delegation_type;
171 delegation->maxsize = res->maxsize;
172 delegation->change_attr = nfsi->change_attr;
173 delegation->cred = get_rpccred(cred);
174 delegation->inode = inode;
176 spin_lock(&clp->cl_lock);
177 if (rcu_dereference(nfsi->delegation) != NULL) {
178 if (memcmp(&delegation->stateid, &nfsi->delegation->stateid,
179 sizeof(delegation->stateid)) == 0 &&
180 delegation->type == nfsi->delegation->type) {
181 goto out;
184 * Deal with broken servers that hand out two
185 * delegations for the same file.
187 dfprintk(FILE, "%s: server %s handed out "
188 "a duplicate delegation!\n",
189 __FUNCTION__, clp->cl_hostname);
190 if (delegation->type <= nfsi->delegation->type) {
191 freeme = delegation;
192 delegation = NULL;
193 goto out;
195 freeme = nfs_detach_delegation_locked(nfsi, NULL);
197 list_add_rcu(&delegation->super_list, &clp->cl_delegations);
198 nfsi->delegation_state = delegation->type;
199 rcu_assign_pointer(nfsi->delegation, delegation);
200 delegation = NULL;
202 /* Ensure we revalidate the attributes and page cache! */
203 spin_lock(&inode->i_lock);
204 nfsi->cache_validity |= NFS_INO_REVAL_FORCED;
205 spin_unlock(&inode->i_lock);
207 out:
208 spin_unlock(&clp->cl_lock);
209 if (delegation != NULL)
210 nfs_free_delegation(delegation);
211 if (freeme != NULL)
212 nfs_do_return_delegation(inode, freeme, 0);
213 return status;
216 /* Sync all data to disk upon delegation return */
217 static void nfs_msync_inode(struct inode *inode)
219 filemap_fdatawrite(inode->i_mapping);
220 nfs_wb_all(inode);
221 filemap_fdatawait(inode->i_mapping);
225 * Basic procedure for returning a delegation to the server
227 static int __nfs_inode_return_delegation(struct inode *inode, struct nfs_delegation *delegation)
229 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
230 struct nfs_inode *nfsi = NFS_I(inode);
232 nfs_msync_inode(inode);
233 down_read(&clp->cl_sem);
234 /* Guard against new delegated open calls */
235 down_write(&nfsi->rwsem);
236 nfs_delegation_claim_opens(inode, &delegation->stateid);
237 up_write(&nfsi->rwsem);
238 up_read(&clp->cl_sem);
239 nfs_msync_inode(inode);
241 return nfs_do_return_delegation(inode, delegation, 1);
245 * This function returns the delegation without reclaiming opens
246 * or protecting against delegation reclaims.
247 * It is therefore really only safe to be called from
248 * nfs4_clear_inode()
250 void nfs_inode_return_delegation_noreclaim(struct inode *inode)
252 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
253 struct nfs_inode *nfsi = NFS_I(inode);
254 struct nfs_delegation *delegation;
256 if (rcu_dereference(nfsi->delegation) != NULL) {
257 spin_lock(&clp->cl_lock);
258 delegation = nfs_detach_delegation_locked(nfsi, NULL);
259 spin_unlock(&clp->cl_lock);
260 if (delegation != NULL)
261 nfs_do_return_delegation(inode, delegation, 0);
265 int nfs_inode_return_delegation(struct inode *inode)
267 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
268 struct nfs_inode *nfsi = NFS_I(inode);
269 struct nfs_delegation *delegation;
270 int err = 0;
272 if (rcu_dereference(nfsi->delegation) != NULL) {
273 spin_lock(&clp->cl_lock);
274 delegation = nfs_detach_delegation_locked(nfsi, NULL);
275 spin_unlock(&clp->cl_lock);
276 if (delegation != NULL)
277 err = __nfs_inode_return_delegation(inode, delegation);
279 return err;
283 * Return all delegations associated to a super block
285 void nfs_return_all_delegations(struct super_block *sb)
287 struct nfs_client *clp = NFS_SB(sb)->nfs_client;
288 struct nfs_delegation *delegation;
289 struct inode *inode;
291 if (clp == NULL)
292 return;
293 restart:
294 rcu_read_lock();
295 list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) {
296 if (delegation->inode->i_sb != sb)
297 continue;
298 inode = igrab(delegation->inode);
299 if (inode == NULL)
300 continue;
301 spin_lock(&clp->cl_lock);
302 delegation = nfs_detach_delegation_locked(NFS_I(inode), NULL);
303 spin_unlock(&clp->cl_lock);
304 rcu_read_unlock();
305 if (delegation != NULL)
306 __nfs_inode_return_delegation(inode, delegation);
307 iput(inode);
308 goto restart;
310 rcu_read_unlock();
313 static int nfs_do_expire_all_delegations(void *ptr)
315 struct nfs_client *clp = ptr;
316 struct nfs_delegation *delegation;
317 struct inode *inode;
319 allow_signal(SIGKILL);
320 restart:
321 if (test_bit(NFS4CLNT_STATE_RECOVER, &clp->cl_state) != 0)
322 goto out;
323 if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0)
324 goto out;
325 rcu_read_lock();
326 list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) {
327 inode = igrab(delegation->inode);
328 if (inode == NULL)
329 continue;
330 spin_lock(&clp->cl_lock);
331 delegation = nfs_detach_delegation_locked(NFS_I(inode), NULL);
332 spin_unlock(&clp->cl_lock);
333 rcu_read_unlock();
334 if (delegation)
335 __nfs_inode_return_delegation(inode, delegation);
336 iput(inode);
337 goto restart;
339 rcu_read_unlock();
340 out:
341 nfs_put_client(clp);
342 module_put_and_exit(0);
345 void nfs_expire_all_delegations(struct nfs_client *clp)
347 struct task_struct *task;
349 __module_get(THIS_MODULE);
350 atomic_inc(&clp->cl_count);
351 task = kthread_run(nfs_do_expire_all_delegations, clp,
352 "%s-delegreturn",
353 rpc_peeraddr2str(clp->cl_rpcclient,
354 RPC_DISPLAY_ADDR));
355 if (!IS_ERR(task))
356 return;
357 nfs_put_client(clp);
358 module_put(THIS_MODULE);
362 * Return all delegations following an NFS4ERR_CB_PATH_DOWN error.
364 void nfs_handle_cb_pathdown(struct nfs_client *clp)
366 struct nfs_delegation *delegation;
367 struct inode *inode;
369 if (clp == NULL)
370 return;
371 restart:
372 rcu_read_lock();
373 list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) {
374 inode = igrab(delegation->inode);
375 if (inode == NULL)
376 continue;
377 spin_lock(&clp->cl_lock);
378 delegation = nfs_detach_delegation_locked(NFS_I(inode), NULL);
379 spin_unlock(&clp->cl_lock);
380 rcu_read_unlock();
381 if (delegation != NULL)
382 __nfs_inode_return_delegation(inode, delegation);
383 iput(inode);
384 goto restart;
386 rcu_read_unlock();
389 struct recall_threadargs {
390 struct inode *inode;
391 struct nfs_client *clp;
392 const nfs4_stateid *stateid;
394 struct completion started;
395 int result;
398 static int recall_thread(void *data)
400 struct recall_threadargs *args = (struct recall_threadargs *)data;
401 struct inode *inode = igrab(args->inode);
402 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
403 struct nfs_inode *nfsi = NFS_I(inode);
404 struct nfs_delegation *delegation;
406 daemonize("nfsv4-delegreturn");
408 nfs_msync_inode(inode);
409 down_read(&clp->cl_sem);
410 down_write(&nfsi->rwsem);
411 spin_lock(&clp->cl_lock);
412 delegation = nfs_detach_delegation_locked(nfsi, args->stateid);
413 if (delegation != NULL)
414 args->result = 0;
415 else
416 args->result = -ENOENT;
417 spin_unlock(&clp->cl_lock);
418 complete(&args->started);
419 nfs_delegation_claim_opens(inode, args->stateid);
420 up_write(&nfsi->rwsem);
421 up_read(&clp->cl_sem);
422 nfs_msync_inode(inode);
424 if (delegation != NULL)
425 nfs_do_return_delegation(inode, delegation, 1);
426 iput(inode);
427 module_put_and_exit(0);
431 * Asynchronous delegation recall!
433 int nfs_async_inode_return_delegation(struct inode *inode, const nfs4_stateid *stateid)
435 struct recall_threadargs data = {
436 .inode = inode,
437 .stateid = stateid,
439 int status;
441 init_completion(&data.started);
442 __module_get(THIS_MODULE);
443 status = kernel_thread(recall_thread, &data, CLONE_KERNEL);
444 if (status < 0)
445 goto out_module_put;
446 wait_for_completion(&data.started);
447 return data.result;
448 out_module_put:
449 module_put(THIS_MODULE);
450 return status;
454 * Retrieve the inode associated with a delegation
456 struct inode *nfs_delegation_find_inode(struct nfs_client *clp, const struct nfs_fh *fhandle)
458 struct nfs_delegation *delegation;
459 struct inode *res = NULL;
460 rcu_read_lock();
461 list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) {
462 if (nfs_compare_fh(fhandle, &NFS_I(delegation->inode)->fh) == 0) {
463 res = igrab(delegation->inode);
464 break;
467 rcu_read_unlock();
468 return res;
472 * Mark all delegations as needing to be reclaimed
474 void nfs_delegation_mark_reclaim(struct nfs_client *clp)
476 struct nfs_delegation *delegation;
477 rcu_read_lock();
478 list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list)
479 delegation->flags |= NFS_DELEGATION_NEED_RECLAIM;
480 rcu_read_unlock();
484 * Reap all unclaimed delegations after reboot recovery is done
486 void nfs_delegation_reap_unclaimed(struct nfs_client *clp)
488 struct nfs_delegation *delegation;
489 restart:
490 rcu_read_lock();
491 list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) {
492 if ((delegation->flags & NFS_DELEGATION_NEED_RECLAIM) == 0)
493 continue;
494 spin_lock(&clp->cl_lock);
495 delegation = nfs_detach_delegation_locked(NFS_I(delegation->inode), NULL);
496 spin_unlock(&clp->cl_lock);
497 rcu_read_unlock();
498 if (delegation != NULL)
499 nfs_free_delegation(delegation);
500 goto restart;
502 rcu_read_unlock();
505 int nfs4_copy_delegation_stateid(nfs4_stateid *dst, struct inode *inode)
507 struct nfs_inode *nfsi = NFS_I(inode);
508 struct nfs_delegation *delegation;
509 int ret = 0;
511 rcu_read_lock();
512 delegation = rcu_dereference(nfsi->delegation);
513 if (delegation != NULL) {
514 memcpy(dst->data, delegation->stateid.data, sizeof(dst->data));
515 ret = 1;
517 rcu_read_unlock();
518 return ret;