Merge 1.8.0~pre4 packaging into master
[pkg-k5-afs_openafs.git] / src / afs / LINUX / osi_vnodeops.c
blobd8dfd93e9a5172789ba73a86062c3c1b5c33993a
1 /*
2 * Copyright 2000, International Business Machines Corporation and others.
3 * All Rights Reserved.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
8 */
11 * Linux specific vnodeops. Also includes the glue routines required to call
12 * AFS vnodeops.
14 * So far the only truly scary part is that Linux relies on the inode cache
15 * to be up to date. Don't you dare break a callback and expect an fstat
16 * to give you meaningful information. This appears to be fixed in the 2.1
17 * development kernels. As it is we can fix this now by intercepting the
18 * stat calls.
21 #include <afsconfig.h>
22 #include "afs/param.h"
25 #include "afs/sysincludes.h"
26 #include "afsincludes.h"
27 #include "afs/afs_stats.h"
28 #include <linux/mm.h>
29 #ifdef HAVE_MM_INLINE_H
30 #include <linux/mm_inline.h>
31 #endif
32 #include <linux/pagemap.h>
33 #include <linux/writeback.h>
34 #include <linux/pagevec.h>
35 #include <linux/aio.h>
36 #include "afs/lock.h"
37 #include "afs/afs_bypasscache.h"
39 #include "osi_compat.h"
40 #include "osi_pagecopy.h"
42 #ifndef HAVE_LINUX_PAGEVEC_LRU_ADD_FILE
43 #define __pagevec_lru_add_file __pagevec_lru_add
44 #endif
46 #ifndef MAX_ERRNO
47 #define MAX_ERRNO 1000L
48 #endif
50 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34)
51 /* Enable our workaround for a race with d_splice_alias. The race was fixed in
52 * 2.6.34, so don't do it after that point. */
53 # define D_SPLICE_ALIAS_RACE
54 #endif
56 int cachefs_noreadpage = 0;
58 extern struct backing_dev_info *afs_backing_dev_info;
60 extern struct vcache *afs_globalVp;
62 /* This function converts a positive error code from AFS into a negative
63 * code suitable for passing into the Linux VFS layer. It checks that the
64 * error code is within the permissable bounds for the ERR_PTR mechanism.
66 * _All_ error codes which come from the AFS layer should be passed through
67 * this function before being returned to the kernel.
70 static inline int
71 afs_convert_code(int code) {
72 if ((code >= 0) && (code <= MAX_ERRNO))
73 return -code;
74 else
75 return -EIO;
78 /* Linux doesn't require a credp for many functions, and crref is an expensive
79 * operation. This helper function avoids obtaining it for VerifyVCache calls
82 static inline int
83 afs_linux_VerifyVCache(struct vcache *avc, cred_t **retcred) {
84 cred_t *credp = NULL;
85 struct vrequest *treq = NULL;
86 int code;
88 if (avc->f.states & CStatd) {
89 if (retcred)
90 *retcred = NULL;
91 return 0;
94 credp = crref();
96 code = afs_CreateReq(&treq, credp);
97 if (code == 0) {
98 code = afs_VerifyVCache2(avc, treq);
99 afs_DestroyReq(treq);
102 if (retcred != NULL)
103 *retcred = credp;
104 else
105 crfree(credp);
107 return afs_convert_code(code);
110 #if defined(STRUCT_FILE_OPERATIONS_HAS_READ_ITER) || defined(HAVE_LINUX_GENERIC_FILE_AIO_READ)
111 # if defined(STRUCT_FILE_OPERATIONS_HAS_READ_ITER)
112 static ssize_t
113 afs_linux_read_iter(struct kiocb *iocb, struct iov_iter *iter)
114 # elif defined(LINUX_HAS_NONVECTOR_AIO)
115 static ssize_t
116 afs_linux_aio_read(struct kiocb *iocb, char __user *buf, size_t bufsize,
117 loff_t pos)
118 # else
119 static ssize_t
120 afs_linux_aio_read(struct kiocb *iocb, const struct iovec *buf,
121 unsigned long bufsize, loff_t pos)
122 # endif
124 struct file *fp = iocb->ki_filp;
125 ssize_t code = 0;
126 struct vcache *vcp = VTOAFS(fp->f_dentry->d_inode);
127 # if defined(STRUCT_FILE_OPERATIONS_HAS_READ_ITER)
128 loff_t pos = iocb->ki_pos;
129 unsigned long bufsize = iter->nr_segs;
130 # endif
133 AFS_GLOCK();
134 afs_Trace4(afs_iclSetp, CM_TRACE_AIOREADOP, ICL_TYPE_POINTER, vcp,
135 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(pos), ICL_TYPE_INT32,
136 (afs_int32)bufsize, ICL_TYPE_INT32, 99999);
137 code = afs_linux_VerifyVCache(vcp, NULL);
139 if (code == 0) {
140 /* Linux's FlushPages implementation doesn't ever use credp,
141 * so we optimise by not using it */
142 osi_FlushPages(vcp, NULL); /* ensure stale pages are gone */
143 AFS_GUNLOCK();
144 # if defined(STRUCT_FILE_OPERATIONS_HAS_READ_ITER)
145 code = generic_file_read_iter(iocb, iter);
146 # else
147 code = generic_file_aio_read(iocb, buf, bufsize, pos);
148 # endif
149 AFS_GLOCK();
152 afs_Trace4(afs_iclSetp, CM_TRACE_AIOREADOP, ICL_TYPE_POINTER, vcp,
153 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(pos), ICL_TYPE_INT32,
154 (afs_int32)bufsize, ICL_TYPE_INT32, code);
155 AFS_GUNLOCK();
156 return code;
158 #else
159 static ssize_t
160 afs_linux_read(struct file *fp, char *buf, size_t count, loff_t * offp)
162 ssize_t code = 0;
163 struct vcache *vcp = VTOAFS(fp->f_dentry->d_inode);
165 AFS_GLOCK();
166 afs_Trace4(afs_iclSetp, CM_TRACE_READOP, ICL_TYPE_POINTER, vcp,
167 ICL_TYPE_OFFSET, offp, ICL_TYPE_INT32, count, ICL_TYPE_INT32,
168 99999);
169 code = afs_linux_VerifyVCache(vcp, NULL);
171 if (code == 0) {
172 /* Linux's FlushPages implementation doesn't ever use credp,
173 * so we optimise by not using it */
174 osi_FlushPages(vcp, NULL); /* ensure stale pages are gone */
175 AFS_GUNLOCK();
176 code = do_sync_read(fp, buf, count, offp);
177 AFS_GLOCK();
180 afs_Trace4(afs_iclSetp, CM_TRACE_READOP, ICL_TYPE_POINTER, vcp,
181 ICL_TYPE_OFFSET, offp, ICL_TYPE_INT32, count, ICL_TYPE_INT32,
182 code);
183 AFS_GUNLOCK();
184 return code;
186 #endif
189 /* Now we have integrated VM for writes as well as reads. the generic write operations
190 * also take care of re-positioning the pointer if file is open in append
191 * mode. Call fake open/close to ensure we do writes of core dumps.
193 #if defined(STRUCT_FILE_OPERATIONS_HAS_READ_ITER) || defined(HAVE_LINUX_GENERIC_FILE_AIO_READ)
194 # if defined(STRUCT_FILE_OPERATIONS_HAS_READ_ITER)
195 static ssize_t
196 afs_linux_write_iter(struct kiocb *iocb, struct iov_iter *iter)
197 # elif defined(LINUX_HAS_NONVECTOR_AIO)
198 static ssize_t
199 afs_linux_aio_write(struct kiocb *iocb, const char __user *buf, size_t bufsize,
200 loff_t pos)
201 # else
202 static ssize_t
203 afs_linux_aio_write(struct kiocb *iocb, const struct iovec *buf,
204 unsigned long bufsize, loff_t pos)
205 # endif
207 ssize_t code = 0;
208 struct vcache *vcp = VTOAFS(iocb->ki_filp->f_dentry->d_inode);
209 cred_t *credp;
210 # if defined(STRUCT_FILE_OPERATIONS_HAS_READ_ITER)
211 loff_t pos = iocb->ki_pos;
212 unsigned long bufsize = iter->nr_segs;
213 # endif
215 AFS_GLOCK();
217 afs_Trace4(afs_iclSetp, CM_TRACE_AIOWRITEOP, ICL_TYPE_POINTER, vcp,
218 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(pos), ICL_TYPE_INT32,
219 (afs_int32)bufsize, ICL_TYPE_INT32,
220 (iocb->ki_filp->f_flags & O_APPEND) ? 99998 : 99999);
222 code = afs_linux_VerifyVCache(vcp, &credp);
224 ObtainWriteLock(&vcp->lock, 529);
225 afs_FakeOpen(vcp);
226 ReleaseWriteLock(&vcp->lock);
227 if (code == 0) {
228 AFS_GUNLOCK();
229 # if defined(STRUCT_FILE_OPERATIONS_HAS_READ_ITER)
230 code = generic_file_write_iter(iocb, iter);
231 # else
232 code = generic_file_aio_write(iocb, buf, bufsize, pos);
233 # endif
234 AFS_GLOCK();
237 ObtainWriteLock(&vcp->lock, 530);
239 if (vcp->execsOrWriters == 1 && !credp)
240 credp = crref();
242 afs_FakeClose(vcp, credp);
243 ReleaseWriteLock(&vcp->lock);
245 afs_Trace4(afs_iclSetp, CM_TRACE_AIOWRITEOP, ICL_TYPE_POINTER, vcp,
246 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(pos), ICL_TYPE_INT32,
247 (afs_int32)bufsize, ICL_TYPE_INT32, code);
249 if (credp)
250 crfree(credp);
251 AFS_GUNLOCK();
252 return code;
254 #else
255 static ssize_t
256 afs_linux_write(struct file *fp, const char *buf, size_t count, loff_t * offp)
258 ssize_t code = 0;
259 struct vcache *vcp = VTOAFS(fp->f_dentry->d_inode);
260 cred_t *credp;
262 AFS_GLOCK();
264 afs_Trace4(afs_iclSetp, CM_TRACE_WRITEOP, ICL_TYPE_POINTER, vcp,
265 ICL_TYPE_OFFSET, offp, ICL_TYPE_INT32, count, ICL_TYPE_INT32,
266 (fp->f_flags & O_APPEND) ? 99998 : 99999);
268 code = afs_linux_VerifyVCache(vcp, &credp);
270 ObtainWriteLock(&vcp->lock, 529);
271 afs_FakeOpen(vcp);
272 ReleaseWriteLock(&vcp->lock);
273 if (code == 0) {
274 AFS_GUNLOCK();
275 code = do_sync_write(fp, buf, count, offp);
276 AFS_GLOCK();
279 ObtainWriteLock(&vcp->lock, 530);
281 if (vcp->execsOrWriters == 1 && !credp)
282 credp = crref();
284 afs_FakeClose(vcp, credp);
285 ReleaseWriteLock(&vcp->lock);
287 afs_Trace4(afs_iclSetp, CM_TRACE_WRITEOP, ICL_TYPE_POINTER, vcp,
288 ICL_TYPE_OFFSET, offp, ICL_TYPE_INT32, count, ICL_TYPE_INT32,
289 code);
291 if (credp)
292 crfree(credp);
293 AFS_GUNLOCK();
294 return code;
296 #endif
298 extern int BlobScan(struct dcache * afile, afs_int32 ablob, afs_int32 *ablobOut);
300 /* This is a complete rewrite of afs_readdir, since we can make use of
301 * filldir instead of afs_readdir_move. Note that changes to vcache/dcache
302 * handling and use of bulkstats will need to be reflected here as well.
304 static int
305 #if defined(STRUCT_FILE_OPERATIONS_HAS_ITERATE)
306 afs_linux_readdir(struct file *fp, struct dir_context *ctx)
307 #else
308 afs_linux_readdir(struct file *fp, void *dirbuf, filldir_t filldir)
309 #endif
311 struct vcache *avc = VTOAFS(FILE_INODE(fp));
312 struct vrequest *treq = NULL;
313 struct dcache *tdc;
314 int code;
315 int offset;
316 afs_int32 dirpos;
317 struct DirEntry *de;
318 struct DirBuffer entry;
319 ino_t ino;
320 int len;
321 afs_size_t origOffset, tlen;
322 cred_t *credp = crref();
323 struct afs_fakestat_state fakestat;
325 AFS_GLOCK();
326 AFS_STATCNT(afs_readdir);
328 code = afs_convert_code(afs_CreateReq(&treq, credp));
329 crfree(credp);
330 if (code)
331 goto out1;
333 afs_InitFakeStat(&fakestat);
334 code = afs_convert_code(afs_EvalFakeStat(&avc, &fakestat, treq));
335 if (code)
336 goto out;
338 /* update the cache entry */
339 tagain:
340 code = afs_convert_code(afs_VerifyVCache2(avc, treq));
341 if (code)
342 goto out;
344 /* get a reference to the entire directory */
345 tdc = afs_GetDCache(avc, (afs_size_t) 0, treq, &origOffset, &tlen, 1);
346 len = tlen;
347 if (!tdc) {
348 code = -EIO;
349 goto out;
351 ObtainWriteLock(&avc->lock, 811);
352 ObtainReadLock(&tdc->lock);
354 * Make sure that the data in the cache is current. There are two
355 * cases we need to worry about:
356 * 1. The cache data is being fetched by another process.
357 * 2. The cache data is no longer valid
359 while ((avc->f.states & CStatd)
360 && (tdc->dflags & DFFetching)
361 && hsame(avc->f.m.DataVersion, tdc->f.versionNo)) {
362 ReleaseReadLock(&tdc->lock);
363 ReleaseWriteLock(&avc->lock);
364 afs_osi_Sleep(&tdc->validPos);
365 ObtainWriteLock(&avc->lock, 812);
366 ObtainReadLock(&tdc->lock);
368 if (!(avc->f.states & CStatd)
369 || !hsame(avc->f.m.DataVersion, tdc->f.versionNo)) {
370 ReleaseReadLock(&tdc->lock);
371 ReleaseWriteLock(&avc->lock);
372 afs_PutDCache(tdc);
373 goto tagain;
376 /* Set the readdir-in-progress flag, and downgrade the lock
377 * to shared so others will be able to acquire a read lock.
379 avc->f.states |= CReadDir;
380 avc->dcreaddir = tdc;
381 avc->readdir_pid = MyPidxx2Pid(MyPidxx);
382 ConvertWToSLock(&avc->lock);
384 /* Fill in until we get an error or we're done. This implementation
385 * takes an offset in units of blobs, rather than bytes.
387 code = 0;
388 #if defined(STRUCT_FILE_OPERATIONS_HAS_ITERATE)
389 offset = ctx->pos;
390 #else
391 offset = (int) fp->f_pos;
392 #endif
393 while (1) {
394 code = BlobScan(tdc, offset, &dirpos);
395 if (code || !dirpos)
396 break;
398 code = afs_dir_GetVerifiedBlob(tdc, dirpos, &entry);
399 if (code) {
400 if (!(avc->f.states & CCorrupt)) {
401 struct cell *tc = afs_GetCellStale(avc->f.fid.Cell, READ_LOCK);
402 afs_warn("afs: Corrupt directory (%d.%d.%d.%d [%s] @%lx, pos %d)\n",
403 avc->f.fid.Cell, avc->f.fid.Fid.Volume,
404 avc->f.fid.Fid.Vnode, avc->f.fid.Fid.Unique,
405 tc ? tc->cellName : "",
406 (unsigned long)&tdc->f.inode, dirpos);
407 if (tc)
408 afs_PutCell(tc, READ_LOCK);
409 UpgradeSToWLock(&avc->lock, 814);
410 avc->f.states |= CCorrupt;
412 code = -EIO;
413 goto unlock_out;
416 de = (struct DirEntry *)entry.data;
417 ino = afs_calc_inum (avc->f.fid.Cell, avc->f.fid.Fid.Volume,
418 ntohl(de->fid.vnode));
419 len = strlen(de->name);
421 /* filldir returns -EINVAL when the buffer is full. */
423 unsigned int type = DT_UNKNOWN;
424 struct VenusFid afid;
425 struct vcache *tvc;
426 int vtype;
427 afid.Cell = avc->f.fid.Cell;
428 afid.Fid.Volume = avc->f.fid.Fid.Volume;
429 afid.Fid.Vnode = ntohl(de->fid.vnode);
430 afid.Fid.Unique = ntohl(de->fid.vunique);
431 if ((avc->f.states & CForeign) == 0 && (ntohl(de->fid.vnode) & 1)) {
432 type = DT_DIR;
433 } else if ((tvc = afs_FindVCache(&afid, 0, 0))) {
434 if (tvc->mvstat != AFS_MVSTAT_FILE) {
435 type = DT_DIR;
436 } else if (((tvc->f.states) & (CStatd | CTruth))) {
437 /* CTruth will be set if the object has
438 *ever* been statd */
439 vtype = vType(tvc);
440 if (vtype == VDIR)
441 type = DT_DIR;
442 else if (vtype == VREG)
443 type = DT_REG;
444 /* Don't do this until we're sure it can't be a mtpt */
445 /* else if (vtype == VLNK)
446 * type=DT_LNK; */
447 /* what other types does AFS support? */
449 /* clean up from afs_FindVCache */
450 afs_PutVCache(tvc);
453 * If this is NFS readdirplus, then the filler is going to
454 * call getattr on this inode, which will deadlock if we're
455 * holding the GLOCK.
457 AFS_GUNLOCK();
458 #if defined(STRUCT_FILE_OPERATIONS_HAS_ITERATE)
459 /* dir_emit returns a bool - true when it succeeds.
460 * Inverse the result to fit with how we check "code" */
461 code = !dir_emit(ctx, de->name, len, ino, type);
462 #else
463 code = (*filldir) (dirbuf, de->name, len, offset, ino, type);
464 #endif
465 AFS_GLOCK();
467 DRelease(&entry, 0);
468 if (code)
469 break;
470 offset = dirpos + 1 + ((len + 16) >> 5);
472 /* If filldir didn't fill in the last one this is still pointing to that
473 * last attempt.
475 code = 0;
477 unlock_out:
478 #if defined(STRUCT_FILE_OPERATIONS_HAS_ITERATE)
479 ctx->pos = (loff_t) offset;
480 #else
481 fp->f_pos = (loff_t) offset;
482 #endif
483 ReleaseReadLock(&tdc->lock);
484 afs_PutDCache(tdc);
485 UpgradeSToWLock(&avc->lock, 813);
486 avc->f.states &= ~CReadDir;
487 avc->dcreaddir = 0;
488 avc->readdir_pid = 0;
489 ReleaseSharedLock(&avc->lock);
491 out:
492 afs_PutFakeStat(&fakestat);
493 afs_DestroyReq(treq);
494 out1:
495 AFS_GUNLOCK();
496 return code;
500 /* in afs_pioctl.c */
501 extern int afs_xioctl(struct inode *ip, struct file *fp, unsigned int com,
502 unsigned long arg);
504 #if defined(HAVE_UNLOCKED_IOCTL) || defined(HAVE_COMPAT_IOCTL)
505 static long afs_unlocked_xioctl(struct file *fp, unsigned int com,
506 unsigned long arg) {
507 return afs_xioctl(FILE_INODE(fp), fp, com, arg);
510 #endif
513 static int
514 afs_linux_mmap(struct file *fp, struct vm_area_struct *vmap)
516 struct vcache *vcp = VTOAFS(FILE_INODE(fp));
517 int code;
519 AFS_GLOCK();
520 afs_Trace3(afs_iclSetp, CM_TRACE_GMAP, ICL_TYPE_POINTER, vcp,
521 ICL_TYPE_POINTER, vmap->vm_start, ICL_TYPE_INT32,
522 vmap->vm_end - vmap->vm_start);
524 /* get a validated vcache entry */
525 code = afs_linux_VerifyVCache(vcp, NULL);
527 if (code == 0) {
528 /* Linux's Flushpage implementation doesn't use credp, so optimise
529 * our code to not need to crref() it */
530 osi_FlushPages(vcp, NULL); /* ensure stale pages are gone */
531 AFS_GUNLOCK();
532 code = generic_file_mmap(fp, vmap);
533 AFS_GLOCK();
534 if (!code)
535 vcp->f.states |= CMAPPED;
537 AFS_GUNLOCK();
539 return code;
542 static int
543 afs_linux_open(struct inode *ip, struct file *fp)
545 struct vcache *vcp = VTOAFS(ip);
546 cred_t *credp = crref();
547 int code;
549 AFS_GLOCK();
550 code = afs_open(&vcp, fp->f_flags, credp);
551 AFS_GUNLOCK();
553 crfree(credp);
554 return afs_convert_code(code);
557 static int
558 afs_linux_release(struct inode *ip, struct file *fp)
560 struct vcache *vcp = VTOAFS(ip);
561 cred_t *credp = crref();
562 int code = 0;
564 AFS_GLOCK();
565 code = afs_close(vcp, fp->f_flags, credp);
566 ObtainWriteLock(&vcp->lock, 807);
567 if (vcp->cred) {
568 crfree(vcp->cred);
569 vcp->cred = NULL;
571 ReleaseWriteLock(&vcp->lock);
572 AFS_GUNLOCK();
574 crfree(credp);
575 return afs_convert_code(code);
578 static int
579 #if defined(FOP_FSYNC_TAKES_DENTRY)
580 afs_linux_fsync(struct file *fp, struct dentry *dp, int datasync)
581 #elif defined(FOP_FSYNC_TAKES_RANGE)
582 afs_linux_fsync(struct file *fp, loff_t start, loff_t end, int datasync)
583 #else
584 afs_linux_fsync(struct file *fp, int datasync)
585 #endif
587 int code;
588 struct inode *ip = FILE_INODE(fp);
589 cred_t *credp = crref();
591 #if defined(FOP_FSYNC_TAKES_RANGE)
592 afs_linux_lock_inode(ip);
593 #endif
594 AFS_GLOCK();
595 code = afs_fsync(VTOAFS(ip), credp);
596 AFS_GUNLOCK();
597 #if defined(FOP_FSYNC_TAKES_RANGE)
598 afs_linux_unlock_inode(ip);
599 #endif
600 crfree(credp);
601 return afs_convert_code(code);
606 static int
607 afs_linux_lock(struct file *fp, int cmd, struct file_lock *flp)
609 int code = 0;
610 struct vcache *vcp = VTOAFS(FILE_INODE(fp));
611 cred_t *credp = crref();
612 struct AFS_FLOCK flock;
614 /* Convert to a lock format afs_lockctl understands. */
615 memset(&flock, 0, sizeof(flock));
616 flock.l_type = flp->fl_type;
617 flock.l_pid = flp->fl_pid;
618 flock.l_whence = 0;
619 flock.l_start = flp->fl_start;
620 if (flp->fl_end == OFFSET_MAX)
621 flock.l_len = 0; /* Lock to end of file */
622 else
623 flock.l_len = flp->fl_end - flp->fl_start + 1;
625 /* Safe because there are no large files, yet */
626 #if defined(F_GETLK64) && (F_GETLK != F_GETLK64)
627 if (cmd == F_GETLK64)
628 cmd = F_GETLK;
629 else if (cmd == F_SETLK64)
630 cmd = F_SETLK;
631 else if (cmd == F_SETLKW64)
632 cmd = F_SETLKW;
633 #endif /* F_GETLK64 && F_GETLK != F_GETLK64 */
635 AFS_GLOCK();
636 code = afs_convert_code(afs_lockctl(vcp, &flock, cmd, credp));
637 AFS_GUNLOCK();
639 if ((code == 0 || flp->fl_type == F_UNLCK) &&
640 (cmd == F_SETLK || cmd == F_SETLKW)) {
641 code = afs_posix_lock_file(fp, flp);
642 if (code && flp->fl_type != F_UNLCK) {
643 struct AFS_FLOCK flock2;
644 flock2 = flock;
645 flock2.l_type = F_UNLCK;
646 AFS_GLOCK();
647 afs_lockctl(vcp, &flock2, F_SETLK, credp);
648 AFS_GUNLOCK();
651 /* If lockctl says there are no conflicting locks, then also check with the
652 * kernel, as lockctl knows nothing about byte range locks
654 if (code == 0 && cmd == F_GETLK && flock.l_type == F_UNLCK) {
655 afs_posix_test_lock(fp, flp);
656 /* If we found a lock in the kernel's structure, return it */
657 if (flp->fl_type != F_UNLCK) {
658 crfree(credp);
659 return 0;
663 /* Convert flock back to Linux's file_lock */
664 flp->fl_type = flock.l_type;
665 flp->fl_pid = flock.l_pid;
666 flp->fl_start = flock.l_start;
667 if (flock.l_len == 0)
668 flp->fl_end = OFFSET_MAX; /* Lock to end of file */
669 else
670 flp->fl_end = flock.l_start + flock.l_len - 1;
672 crfree(credp);
673 return code;
676 #ifdef STRUCT_FILE_OPERATIONS_HAS_FLOCK
677 static int
678 afs_linux_flock(struct file *fp, int cmd, struct file_lock *flp) {
679 int code = 0;
680 struct vcache *vcp = VTOAFS(FILE_INODE(fp));
681 cred_t *credp = crref();
682 struct AFS_FLOCK flock;
683 /* Convert to a lock format afs_lockctl understands. */
684 memset(&flock, 0, sizeof(flock));
685 flock.l_type = flp->fl_type;
686 flock.l_pid = flp->fl_pid;
687 flock.l_whence = 0;
688 flock.l_start = 0;
689 flock.l_len = 0;
691 /* Safe because there are no large files, yet */
692 #if defined(F_GETLK64) && (F_GETLK != F_GETLK64)
693 if (cmd == F_GETLK64)
694 cmd = F_GETLK;
695 else if (cmd == F_SETLK64)
696 cmd = F_SETLK;
697 else if (cmd == F_SETLKW64)
698 cmd = F_SETLKW;
699 #endif /* F_GETLK64 && F_GETLK != F_GETLK64 */
701 AFS_GLOCK();
702 code = afs_convert_code(afs_lockctl(vcp, &flock, cmd, credp));
703 AFS_GUNLOCK();
705 if ((code == 0 || flp->fl_type == F_UNLCK) &&
706 (cmd == F_SETLK || cmd == F_SETLKW)) {
707 flp->fl_flags &=~ FL_SLEEP;
708 code = flock_lock_file_wait(fp, flp);
709 if (code && flp->fl_type != F_UNLCK) {
710 struct AFS_FLOCK flock2;
711 flock2 = flock;
712 flock2.l_type = F_UNLCK;
713 AFS_GLOCK();
714 afs_lockctl(vcp, &flock2, F_SETLK, credp);
715 AFS_GUNLOCK();
718 /* Convert flock back to Linux's file_lock */
719 flp->fl_type = flock.l_type;
720 flp->fl_pid = flock.l_pid;
722 crfree(credp);
723 return code;
725 #endif
727 /* afs_linux_flush
728 * essentially the same as afs_fsync() but we need to get the return
729 * code for the sys_close() here, not afs_linux_release(), so call
730 * afs_StoreAllSegments() with AFS_LASTSTORE
732 static int
733 #if defined(FOP_FLUSH_TAKES_FL_OWNER_T)
734 afs_linux_flush(struct file *fp, fl_owner_t id)
735 #else
736 afs_linux_flush(struct file *fp)
737 #endif
739 struct vrequest *treq = NULL;
740 struct vcache *vcp;
741 cred_t *credp;
742 int code;
743 int bypasscache = 0;
745 AFS_GLOCK();
747 if ((fp->f_flags & O_ACCMODE) == O_RDONLY) { /* readers dont flush */
748 AFS_GUNLOCK();
749 return 0;
752 AFS_DISCON_LOCK();
754 credp = crref();
755 vcp = VTOAFS(FILE_INODE(fp));
757 code = afs_CreateReq(&treq, credp);
758 if (code)
759 goto out;
760 /* If caching is bypassed for this file, or globally, just return 0 */
761 if (cache_bypass_strategy == ALWAYS_BYPASS_CACHE)
762 bypasscache = 1;
763 else {
764 ObtainReadLock(&vcp->lock);
765 if (vcp->cachingStates & FCSBypass)
766 bypasscache = 1;
767 ReleaseReadLock(&vcp->lock);
769 if (bypasscache) {
770 /* future proof: don't rely on 0 return from afs_InitReq */
771 code = 0;
772 goto out;
775 ObtainSharedLock(&vcp->lock, 535);
776 if ((vcp->execsOrWriters > 0) && (file_count(fp) == 1)) {
777 UpgradeSToWLock(&vcp->lock, 536);
778 if (!AFS_IS_DISCONNECTED) {
779 code = afs_StoreAllSegments(vcp,
780 treq,
781 AFS_SYNC | AFS_LASTSTORE);
782 } else {
783 afs_DisconAddDirty(vcp, VDisconWriteOsiFlush, 1);
785 ConvertWToSLock(&vcp->lock);
787 code = afs_CheckCode(code, treq, 54);
788 ReleaseSharedLock(&vcp->lock);
790 out:
791 afs_DestroyReq(treq);
792 AFS_DISCON_UNLOCK();
793 AFS_GUNLOCK();
795 crfree(credp);
796 return afs_convert_code(code);
799 struct file_operations afs_dir_fops = {
800 .read = generic_read_dir,
801 #if defined(STRUCT_FILE_OPERATIONS_HAS_ITERATE)
802 .iterate = afs_linux_readdir,
803 #else
804 .readdir = afs_linux_readdir,
805 #endif
806 #ifdef HAVE_UNLOCKED_IOCTL
807 .unlocked_ioctl = afs_unlocked_xioctl,
808 #else
809 .ioctl = afs_xioctl,
810 #endif
811 #ifdef HAVE_COMPAT_IOCTL
812 .compat_ioctl = afs_unlocked_xioctl,
813 #endif
814 .open = afs_linux_open,
815 .release = afs_linux_release,
816 .llseek = default_llseek,
817 #ifdef HAVE_LINUX_NOOP_FSYNC
818 .fsync = noop_fsync,
819 #else
820 .fsync = simple_sync_file,
821 #endif
824 struct file_operations afs_file_fops = {
825 #ifdef STRUCT_FILE_OPERATIONS_HAS_READ_ITER
826 .read_iter = afs_linux_read_iter,
827 .write_iter = afs_linux_write_iter,
828 # if !defined(HAVE_LINUX___VFS_WRITE) && !defined(HAVE_LINUX_KERNEL_WRITE)
829 .read = new_sync_read,
830 .write = new_sync_write,
831 # endif
832 #elif defined(HAVE_LINUX_GENERIC_FILE_AIO_READ)
833 .aio_read = afs_linux_aio_read,
834 .aio_write = afs_linux_aio_write,
835 .read = do_sync_read,
836 .write = do_sync_write,
837 #else
838 .read = afs_linux_read,
839 .write = afs_linux_write,
840 #endif
841 #ifdef HAVE_UNLOCKED_IOCTL
842 .unlocked_ioctl = afs_unlocked_xioctl,
843 #else
844 .ioctl = afs_xioctl,
845 #endif
846 #ifdef HAVE_COMPAT_IOCTL
847 .compat_ioctl = afs_unlocked_xioctl,
848 #endif
849 .mmap = afs_linux_mmap,
850 .open = afs_linux_open,
851 .flush = afs_linux_flush,
852 #if defined(STRUCT_FILE_OPERATIONS_HAS_SENDFILE)
853 .sendfile = generic_file_sendfile,
854 #endif
855 #if defined(STRUCT_FILE_OPERATIONS_HAS_SPLICE) && !defined(HAVE_LINUX_DEFAULT_FILE_SPLICE_READ)
856 # if defined(HAVE_LINUX_ITER_FILE_SPLICE_WRITE)
857 .splice_write = iter_file_splice_write,
858 # else
859 .splice_write = generic_file_splice_write,
860 # endif
861 .splice_read = generic_file_splice_read,
862 #endif
863 .release = afs_linux_release,
864 .fsync = afs_linux_fsync,
865 .lock = afs_linux_lock,
866 #ifdef STRUCT_FILE_OPERATIONS_HAS_FLOCK
867 .flock = afs_linux_flock,
868 #endif
869 .llseek = default_llseek,
872 static struct dentry *
873 canonical_dentry(struct inode *ip)
875 struct vcache *vcp = VTOAFS(ip);
876 struct dentry *first = NULL, *ret = NULL, *cur;
877 #if defined(D_ALIAS_IS_HLIST) && !defined(HLIST_ITERATOR_NO_NODE)
878 struct hlist_node *p;
879 #endif
881 /* general strategy:
882 * if vcp->target_link is set, and can be found in ip->i_dentry, use that.
883 * otherwise, use the first dentry in ip->i_dentry.
884 * if ip->i_dentry is empty, use the 'dentry' argument we were given.
886 /* note that vcp->target_link specifies which dentry to use, but we have
887 * no reference held on that dentry. so, we cannot use or dereference
888 * vcp->target_link itself, since it may have been freed. instead, we only
889 * use it to compare to pointers in the ip->i_dentry list. */
891 d_prune_aliases(ip);
893 afs_d_alias_lock(ip);
895 #if defined(D_ALIAS_IS_HLIST)
896 # if defined(HLIST_ITERATOR_NO_NODE)
897 hlist_for_each_entry(cur, &ip->i_dentry, d_alias) {
898 # else
899 hlist_for_each_entry(cur, p, &ip->i_dentry, d_alias) {
900 # endif
901 #else
902 list_for_each_entry_reverse(cur, &ip->i_dentry, d_alias) {
903 #endif
905 if (!vcp->target_link || cur == vcp->target_link) {
906 ret = cur;
907 break;
910 if (!first) {
911 first = cur;
914 if (!ret && first) {
915 ret = first;
918 vcp->target_link = ret;
920 if (ret) {
921 afs_linux_dget(ret);
923 afs_d_alias_unlock(ip);
925 return ret;
928 /**********************************************************************
929 * AFS Linux dentry operations
930 **********************************************************************/
932 /* afs_linux_revalidate
933 * Ensure vcache is stat'd before use. Return 0 if entry is valid.
935 static int
936 afs_linux_revalidate(struct dentry *dp)
938 struct vattr *vattr = NULL;
939 struct vcache *vcp = VTOAFS(dp->d_inode);
940 cred_t *credp;
941 int code;
943 if (afs_shuttingdown != AFS_RUNNING)
944 return EIO;
946 AFS_GLOCK();
948 code = afs_CreateAttr(&vattr);
949 if (code) {
950 goto out;
953 /* This avoids the crref when we don't have to do it. Watch for
954 * changes in afs_getattr that don't get replicated here!
956 if (vcp->f.states & CStatd &&
957 (!afs_fakestat_enable || vcp->mvstat != AFS_MVSTAT_MTPT) &&
958 !afs_nfsexporter &&
959 (vType(vcp) == VDIR || vType(vcp) == VLNK)) {
960 code = afs_CopyOutAttrs(vcp, vattr);
961 } else {
962 credp = crref();
963 code = afs_getattr(vcp, vattr, credp);
964 crfree(credp);
967 if (!code)
968 afs_fill_inode(AFSTOV(vcp), vattr);
970 afs_DestroyAttr(vattr);
972 out:
973 AFS_GUNLOCK();
975 return afs_convert_code(code);
978 /* vattr_setattr
979 * Set iattr data into vattr. Assume vattr cleared before call.
981 static void
982 iattr2vattr(struct vattr *vattrp, struct iattr *iattrp)
984 vattrp->va_mask = iattrp->ia_valid;
985 if (iattrp->ia_valid & ATTR_MODE)
986 vattrp->va_mode = iattrp->ia_mode;
987 if (iattrp->ia_valid & ATTR_UID)
988 vattrp->va_uid = afs_from_kuid(iattrp->ia_uid);
989 if (iattrp->ia_valid & ATTR_GID)
990 vattrp->va_gid = afs_from_kgid(iattrp->ia_gid);
991 if (iattrp->ia_valid & ATTR_SIZE)
992 vattrp->va_size = iattrp->ia_size;
993 if (iattrp->ia_valid & ATTR_ATIME) {
994 vattrp->va_atime.tv_sec = iattrp->ia_atime.tv_sec;
995 vattrp->va_atime.tv_usec = 0;
997 if (iattrp->ia_valid & ATTR_MTIME) {
998 vattrp->va_mtime.tv_sec = iattrp->ia_mtime.tv_sec;
999 vattrp->va_mtime.tv_usec = 0;
1001 if (iattrp->ia_valid & ATTR_CTIME) {
1002 vattrp->va_ctime.tv_sec = iattrp->ia_ctime.tv_sec;
1003 vattrp->va_ctime.tv_usec = 0;
1007 /* vattr2inode
1008 * Rewrite the inode cache from the attr. Assumes all vattr fields are valid.
1010 void
1011 vattr2inode(struct inode *ip, struct vattr *vp)
1013 ip->i_ino = vp->va_nodeid;
1014 #ifdef HAVE_LINUX_SET_NLINK
1015 set_nlink(ip, vp->va_nlink);
1016 #else
1017 ip->i_nlink = vp->va_nlink;
1018 #endif
1019 ip->i_blocks = vp->va_blocks;
1020 #ifdef STRUCT_INODE_HAS_I_BLKBITS
1021 ip->i_blkbits = AFS_BLKBITS;
1022 #endif
1023 #ifdef STRUCT_INODE_HAS_I_BLKSIZE
1024 ip->i_blksize = vp->va_blocksize;
1025 #endif
1026 ip->i_rdev = vp->va_rdev;
1027 ip->i_mode = vp->va_mode;
1028 ip->i_uid = afs_make_kuid(vp->va_uid);
1029 ip->i_gid = afs_make_kgid(vp->va_gid);
1030 i_size_write(ip, vp->va_size);
1031 ip->i_atime.tv_sec = vp->va_atime.tv_sec;
1032 ip->i_atime.tv_nsec = 0;
1033 ip->i_mtime.tv_sec = vp->va_mtime.tv_sec;
1034 /* Set the mtime nanoseconds to the sysname generation number.
1035 * This convinces NFS clients that all directories have changed
1036 * any time the sysname list changes.
1038 ip->i_mtime.tv_nsec = afs_sysnamegen;
1039 ip->i_ctime.tv_sec = vp->va_ctime.tv_sec;
1040 ip->i_ctime.tv_nsec = 0;
1043 /* afs_notify_change
1044 * Linux version of setattr call. What to change is in the iattr struct.
1045 * We need to set bits in both the Linux inode as well as the vcache.
1047 static int
1048 afs_notify_change(struct dentry *dp, struct iattr *iattrp)
1050 struct vattr *vattr = NULL;
1051 cred_t *credp = crref();
1052 struct inode *ip = dp->d_inode;
1053 int code;
1055 AFS_GLOCK();
1056 code = afs_CreateAttr(&vattr);
1057 if (code) {
1058 goto out;
1061 iattr2vattr(vattr, iattrp); /* Convert for AFS vnodeops call. */
1063 code = afs_setattr(VTOAFS(ip), vattr, credp);
1064 if (!code) {
1065 afs_getattr(VTOAFS(ip), vattr, credp);
1066 vattr2inode(ip, vattr);
1068 afs_DestroyAttr(vattr);
1070 out:
1071 AFS_GUNLOCK();
1072 crfree(credp);
1073 return afs_convert_code(code);
1076 #if defined(IOP_GETATTR_TAKES_PATH_STRUCT)
1077 static int
1078 afs_linux_getattr(const struct path *path, struct kstat *stat, u32 request_mask, unsigned int sync_mode)
1080 int err = afs_linux_revalidate(path->dentry);
1081 if (!err) {
1082 generic_fillattr(path->dentry->d_inode, stat);
1084 return err;
1086 #else
1087 static int
1088 afs_linux_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
1090 int err = afs_linux_revalidate(dentry);
1091 if (!err) {
1092 generic_fillattr(dentry->d_inode, stat);
1094 return err;
1096 #endif
1098 static afs_uint32
1099 parent_vcache_dv(struct inode *inode, cred_t *credp)
1101 int free_cred = 0;
1102 struct vcache *pvcp;
1105 * If parent is a mount point and we are using fakestat, we may need
1106 * to look at the fake vcache entry instead of what the vfs is giving
1107 * us. The fake entry is the one with the useful DataVersion.
1109 pvcp = VTOAFS(inode);
1110 if (pvcp->mvstat == AFS_MVSTAT_MTPT && afs_fakestat_enable) {
1111 struct vrequest treq;
1112 struct afs_fakestat_state fakestate;
1114 if (!credp) {
1115 credp = crref();
1116 free_cred = 1;
1118 afs_InitReq(&treq, credp);
1119 afs_InitFakeStat(&fakestate);
1120 afs_TryEvalFakeStat(&pvcp, &fakestate, &treq);
1121 if (free_cred)
1122 crfree(credp);
1123 afs_PutFakeStat(&fakestate);
1125 return hgetlo(pvcp->f.m.DataVersion);
1128 #ifdef D_SPLICE_ALIAS_RACE
1129 /* Leave some trace that this code is enabled; otherwise it's pretty hard to
1130 * tell. */
1131 static __attribute__((used)) const char dentry_race_marker[] = "d_splice_alias race workaround enabled";
1133 static int
1134 check_dentry_race(struct dentry *dp)
1136 int raced = 0;
1137 if (!dp->d_inode) {
1138 struct dentry *parent = dget_parent(dp);
1140 /* In Linux, before commit 4919c5e45a91b5db5a41695fe0357fbdff0d5767,
1141 * d_splice_alias can momentarily hash a dentry before it's fully
1142 * populated. This only happens for a moment, since it's unhashed again
1143 * right after (in d_move), but this can make the dentry be found by
1144 * __d_lookup, and then given to us.
1146 * So check if the dentry is unhashed; if it is, then the dentry is not
1147 * valid. We lock the parent inode to ensure that d_splice_alias is no
1148 * longer running (the inode mutex will be held during
1149 * afs_linux_lookup). Locking d_lock is required to check the dentry's
1150 * flags, so lock that, too.
1152 afs_linux_lock_inode(parent->d_inode);
1153 spin_lock(&dp->d_lock);
1154 if (d_unhashed(dp)) {
1155 raced = 1;
1157 spin_unlock(&dp->d_lock);
1158 afs_linux_unlock_inode(parent->d_inode);
1160 dput(parent);
1162 return raced;
1164 #endif /* D_SPLICE_ALIAS_RACE */
1166 /* Validate a dentry. Return 1 if unchanged, 0 if VFS layer should re-evaluate.
1167 * In kernels 2.2.10 and above, we are passed an additional flags var which
1168 * may have either the LOOKUP_FOLLOW OR LOOKUP_DIRECTORY set in which case
1169 * we are advised to follow the entry if it is a link or to make sure that
1170 * it is a directory. But since the kernel itself checks these possibilities
1171 * later on, we shouldn't have to do it until later. Perhaps in the future..
1173 * The code here assumes that on entry the global lock is not held
1175 static int
1176 #if defined(DOP_REVALIDATE_TAKES_UNSIGNED)
1177 afs_linux_dentry_revalidate(struct dentry *dp, unsigned int flags)
1178 #elif defined(DOP_REVALIDATE_TAKES_NAMEIDATA)
1179 afs_linux_dentry_revalidate(struct dentry *dp, struct nameidata *nd)
1180 #else
1181 afs_linux_dentry_revalidate(struct dentry *dp, int flags)
1182 #endif
1184 cred_t *credp = NULL;
1185 struct vcache *vcp, *pvcp, *tvc = NULL;
1186 struct dentry *parent;
1187 int valid;
1188 struct afs_fakestat_state fakestate;
1189 int force_drop = 0;
1190 afs_uint32 parent_dv;
1192 #ifdef LOOKUP_RCU
1193 /* We don't support RCU path walking */
1194 # if defined(DOP_REVALIDATE_TAKES_UNSIGNED)
1195 if (flags & LOOKUP_RCU)
1196 # else
1197 if (nd->flags & LOOKUP_RCU)
1198 # endif
1199 return -ECHILD;
1200 #endif
1202 #ifdef D_SPLICE_ALIAS_RACE
1203 if (check_dentry_race(dp)) {
1204 valid = 0;
1205 return valid;
1207 #endif
1209 AFS_GLOCK();
1210 afs_InitFakeStat(&fakestate);
1212 if (dp->d_inode) {
1213 vcp = VTOAFS(dp->d_inode);
1215 if (vcp == afs_globalVp)
1216 goto good_dentry;
1218 if (vcp->mvstat == AFS_MVSTAT_MTPT) {
1219 if (vcp->mvid.target_root && (vcp->f.states & CMValid)) {
1220 int tryEvalOnly = 0;
1221 int code = 0;
1222 struct vrequest *treq = NULL;
1224 credp = crref();
1226 code = afs_CreateReq(&treq, credp);
1227 if (code) {
1228 goto bad_dentry;
1230 if ((strcmp(dp->d_name.name, ".directory") == 0)) {
1231 tryEvalOnly = 1;
1233 if (tryEvalOnly)
1234 code = afs_TryEvalFakeStat(&vcp, &fakestate, treq);
1235 else
1236 code = afs_EvalFakeStat(&vcp, &fakestate, treq);
1237 afs_DestroyReq(treq);
1238 if ((tryEvalOnly && vcp->mvstat == AFS_MVSTAT_MTPT) || code) {
1239 /* a mount point, not yet replaced by its directory */
1240 goto bad_dentry;
1243 } else if (vcp->mvstat == AFS_MVSTAT_ROOT && *dp->d_name.name != '/') {
1244 osi_Assert(vcp->mvid.parent != NULL);
1247 #ifdef notdef
1248 /* If the last looker changes, we should make sure the current
1249 * looker still has permission to examine this file. This would
1250 * always require a crref() which would be "slow".
1252 if (vcp->last_looker != treq.uid) {
1253 if (!afs_AccessOK(vcp, (vType(vcp) == VREG) ? PRSFS_READ : PRSFS_LOOKUP, &treq, CHECK_MODE_BITS)) {
1254 goto bad_dentry;
1257 vcp->last_looker = treq.uid;
1259 #endif
1261 parent = dget_parent(dp);
1262 pvcp = VTOAFS(parent->d_inode);
1263 parent_dv = parent_vcache_dv(parent->d_inode, credp);
1265 /* If the parent's DataVersion has changed or the vnode
1266 * is longer valid, we need to do a full lookup. VerifyVCache
1267 * isn't enough since the vnode may have been renamed.
1270 if (parent_dv > dp->d_time || !(vcp->f.states & CStatd)) {
1271 struct vattr *vattr = NULL;
1272 int code;
1273 int lookup_good;
1275 if (credp == NULL) {
1276 credp = crref();
1278 code = afs_lookup(pvcp, (char *)dp->d_name.name, &tvc, credp);
1280 if (code) {
1281 /* We couldn't perform the lookup, so we're not okay. */
1282 lookup_good = 0;
1284 } else if (tvc == vcp) {
1285 /* We got back the same vcache, so we're good. */
1286 lookup_good = 1;
1288 } else if (tvc == VTOAFS(dp->d_inode)) {
1289 /* We got back the same vcache, so we're good. This is
1290 * different from the above case, because sometimes 'vcp' is
1291 * not the same as the vcache for dp->d_inode, if 'vcp' was a
1292 * mtpt and we evaluated it to a root dir. In rare cases,
1293 * afs_lookup might not evalute the mtpt when we do, or vice
1294 * versa, so the previous case will not succeed. But this is
1295 * still 'correct', so make sure not to mark the dentry as
1296 * invalid; it still points to the same thing! */
1297 lookup_good = 1;
1299 } else {
1300 /* We got back a different file, so we're definitely not
1301 * okay. */
1302 lookup_good = 0;
1305 if (!lookup_good) {
1306 dput(parent);
1307 /* Force unhash; the name doesn't point to this file
1308 * anymore. */
1309 force_drop = 1;
1310 if (code && code != ENOENT) {
1311 /* ...except if we couldn't perform the actual lookup,
1312 * we don't know if the name points to this file or not. */
1313 force_drop = 0;
1315 goto bad_dentry;
1318 code = afs_CreateAttr(&vattr);
1319 if (code) {
1320 dput(parent);
1321 goto bad_dentry;
1324 if (afs_getattr(vcp, vattr, credp)) {
1325 dput(parent);
1326 afs_DestroyAttr(vattr);
1327 goto bad_dentry;
1330 vattr2inode(AFSTOV(vcp), vattr);
1331 dp->d_time = parent_dv;
1333 afs_DestroyAttr(vattr);
1336 /* should we always update the attributes at this point? */
1337 /* unlikely--the vcache entry hasn't changed */
1339 dput(parent);
1341 } else {
1343 /* 'dp' represents a cached negative lookup. */
1345 parent = dget_parent(dp);
1346 pvcp = VTOAFS(parent->d_inode);
1347 parent_dv = parent_vcache_dv(parent->d_inode, credp);
1349 if (parent_dv > dp->d_time || !(pvcp->f.states & CStatd)
1350 || afs_IsDynroot(pvcp)) {
1351 dput(parent);
1352 goto bad_dentry;
1355 dput(parent);
1358 good_dentry:
1359 valid = 1;
1360 goto done;
1362 bad_dentry:
1363 valid = 0;
1364 #ifndef D_INVALIDATE_IS_VOID
1365 /* When (v3.18) d_invalidate was converted to void, it also started
1366 * being called automatically from revalidate, and automatically
1367 * handled:
1368 * - shrink_dcache_parent
1369 * - automatic detach of submounts
1370 * - d_drop
1371 * Therefore, after that point, OpenAFS revalidate logic no longer needs
1372 * to do any of those things itself for invalid dentry structs. We only need
1373 * to tell VFS it's invalid (by returning 0), and VFS will handle the rest.
1375 if (have_submounts(dp))
1376 valid = 1;
1377 #endif
1379 done:
1380 /* Clean up */
1381 if (tvc)
1382 afs_PutVCache(tvc);
1383 afs_PutFakeStat(&fakestate);
1384 AFS_GUNLOCK();
1385 if (credp)
1386 crfree(credp);
1388 #ifndef D_INVALIDATE_IS_VOID
1389 if (!valid) {
1391 * If we had a negative lookup for the name we want to forcibly
1392 * unhash the dentry.
1393 * Otherwise use d_invalidate which will not unhash it if still in use.
1395 if (force_drop) {
1396 shrink_dcache_parent(dp);
1397 d_drop(dp);
1398 } else
1399 d_invalidate(dp);
1401 #endif
1402 return valid;
1406 static void
1407 afs_dentry_iput(struct dentry *dp, struct inode *ip)
1409 struct vcache *vcp = VTOAFS(ip);
1411 AFS_GLOCK();
1412 if (!AFS_IS_DISCONNECTED || (vcp->f.states & CUnlinked)) {
1413 (void) afs_InactiveVCache(vcp, NULL);
1415 AFS_GUNLOCK();
1416 afs_linux_clear_nfsfs_renamed(dp);
1418 iput(ip);
1421 static int
1422 #if defined(DOP_D_DELETE_TAKES_CONST)
1423 afs_dentry_delete(const struct dentry *dp)
1424 #else
1425 afs_dentry_delete(struct dentry *dp)
1426 #endif
1428 if (dp->d_inode && (VTOAFS(dp->d_inode)->f.states & CUnlinked))
1429 return 1; /* bad inode? */
1431 return 0;
1434 #ifdef STRUCT_DENTRY_OPERATIONS_HAS_D_AUTOMOUNT
1435 static struct vfsmount *
1436 afs_dentry_automount(afs_linux_path_t *path)
1438 struct dentry *target;
1441 * Avoid symlink resolution limits when resolving; we cannot contribute to
1442 * an infinite symlink loop.
1444 * On newer kernels the field has moved to the private nameidata structure
1445 * so we can't adjust it here. This may cause ELOOP when using a path with
1446 * 40 or more directories that are not already in the dentry cache.
1448 #if defined(STRUCT_TASK_STRUCT_HAS_TOTAL_LINK_COUNT)
1449 current->total_link_count--;
1450 #endif
1452 target = canonical_dentry(path->dentry->d_inode);
1454 if (target == path->dentry) {
1455 dput(target);
1456 target = NULL;
1459 if (target) {
1460 dput(path->dentry);
1461 path->dentry = target;
1463 } else {
1464 spin_lock(&path->dentry->d_lock);
1465 path->dentry->d_flags &= ~DCACHE_NEED_AUTOMOUNT;
1466 spin_unlock(&path->dentry->d_lock);
1469 return NULL;
1471 #endif /* STRUCT_DENTRY_OPERATIONS_HAS_D_AUTOMOUNT */
1473 struct dentry_operations afs_dentry_operations = {
1474 .d_revalidate = afs_linux_dentry_revalidate,
1475 .d_delete = afs_dentry_delete,
1476 .d_iput = afs_dentry_iput,
1477 #ifdef STRUCT_DENTRY_OPERATIONS_HAS_D_AUTOMOUNT
1478 .d_automount = afs_dentry_automount,
1479 #endif /* STRUCT_DENTRY_OPERATIONS_HAS_D_AUTOMOUNT */
1482 /**********************************************************************
1483 * AFS Linux inode operations
1484 **********************************************************************/
1486 /* afs_linux_create
1488 * Merely need to set enough of vattr to get us through the create. Note
1489 * that the higher level code (open_namei) will take care of any tuncation
1490 * explicitly. Exclusive open is also taken care of in open_namei.
1492 * name is in kernel space at this point.
1494 static int
1495 #if defined(IOP_CREATE_TAKES_BOOL)
1496 afs_linux_create(struct inode *dip, struct dentry *dp, umode_t mode,
1497 bool excl)
1498 #elif defined(IOP_CREATE_TAKES_UMODE_T)
1499 afs_linux_create(struct inode *dip, struct dentry *dp, umode_t mode,
1500 struct nameidata *nd)
1501 #elif defined(IOP_CREATE_TAKES_NAMEIDATA)
1502 afs_linux_create(struct inode *dip, struct dentry *dp, int mode,
1503 struct nameidata *nd)
1504 #else
1505 afs_linux_create(struct inode *dip, struct dentry *dp, int mode)
1506 #endif
1508 struct vattr *vattr = NULL;
1509 cred_t *credp = crref();
1510 const char *name = dp->d_name.name;
1511 struct vcache *vcp;
1512 int code;
1514 AFS_GLOCK();
1516 code = afs_CreateAttr(&vattr);
1517 if (code) {
1518 goto out;
1520 vattr->va_mode = mode;
1521 vattr->va_type = mode & S_IFMT;
1523 code = afs_create(VTOAFS(dip), (char *)name, vattr, NONEXCL, mode,
1524 &vcp, credp);
1526 if (!code) {
1527 struct inode *ip = AFSTOV(vcp);
1529 afs_getattr(vcp, vattr, credp);
1530 afs_fill_inode(ip, vattr);
1531 insert_inode_hash(ip);
1532 #if !defined(STRUCT_SUPER_BLOCK_HAS_S_D_OP)
1533 dp->d_op = &afs_dentry_operations;
1534 #endif
1535 dp->d_time = parent_vcache_dv(dip, credp);
1536 d_instantiate(dp, ip);
1539 afs_DestroyAttr(vattr);
1541 out:
1542 AFS_GUNLOCK();
1544 crfree(credp);
1545 return afs_convert_code(code);
1548 /* afs_linux_lookup */
1549 static struct dentry *
1550 #if defined(IOP_LOOKUP_TAKES_UNSIGNED)
1551 afs_linux_lookup(struct inode *dip, struct dentry *dp,
1552 unsigned flags)
1553 #elif defined(IOP_LOOKUP_TAKES_NAMEIDATA)
1554 afs_linux_lookup(struct inode *dip, struct dentry *dp,
1555 struct nameidata *nd)
1556 #else
1557 afs_linux_lookup(struct inode *dip, struct dentry *dp)
1558 #endif
1560 cred_t *credp = crref();
1561 struct vcache *vcp = NULL;
1562 const char *comp = dp->d_name.name;
1563 struct inode *ip = NULL;
1564 struct dentry *newdp = NULL;
1565 int code;
1567 AFS_GLOCK();
1569 code = afs_lookup(VTOAFS(dip), (char *)comp, &vcp, credp);
1570 if (code == ENOENT) {
1571 /* It's ok for the file to not be found. That's noted by the caller by
1572 * seeing that the dp->d_inode field is NULL (set by d_splice_alias or
1573 * d_add, below). */
1574 code = 0;
1575 osi_Assert(vcp == NULL);
1577 if (code) {
1578 AFS_GUNLOCK();
1579 goto done;
1582 if (vcp) {
1583 struct vattr *vattr = NULL;
1584 struct vcache *parent_vc = VTOAFS(dip);
1586 if (parent_vc == vcp) {
1587 /* This is possible if the parent dir is a mountpoint to a volume,
1588 * and the dir entry we looked up is a mountpoint to the same
1589 * volume. Linux cannot cope with this, so return an error instead
1590 * of risking a deadlock or panic. */
1591 afs_PutVCache(vcp);
1592 code = EDEADLK;
1593 AFS_GUNLOCK();
1594 goto done;
1597 code = afs_CreateAttr(&vattr);
1598 if (code) {
1599 afs_PutVCache(vcp);
1600 AFS_GUNLOCK();
1601 goto done;
1604 ip = AFSTOV(vcp);
1605 afs_getattr(vcp, vattr, credp);
1606 afs_fill_inode(ip, vattr);
1607 if (hlist_unhashed(&ip->i_hash))
1608 insert_inode_hash(ip);
1610 afs_DestroyAttr(vattr);
1612 #if !defined(STRUCT_SUPER_BLOCK_HAS_S_D_OP)
1613 dp->d_op = &afs_dentry_operations;
1614 #endif
1615 dp->d_time = parent_vcache_dv(dip, credp);
1617 AFS_GUNLOCK();
1619 if (ip && S_ISDIR(ip->i_mode)) {
1620 d_prune_aliases(ip);
1622 #ifdef STRUCT_DENTRY_OPERATIONS_HAS_D_AUTOMOUNT
1623 /* Only needed if this is a volume root */
1624 if (vcp->mvstat == 2)
1625 ip->i_flags |= S_AUTOMOUNT;
1626 #endif
1629 * Take an extra reference so the inode doesn't go away if
1630 * d_splice_alias drops our reference on error.
1632 if (ip)
1633 #ifdef HAVE_LINUX_IHOLD
1634 ihold(ip);
1635 #else
1636 igrab(ip);
1637 #endif
1639 newdp = d_splice_alias(ip, dp);
1641 done:
1642 crfree(credp);
1644 if (IS_ERR(newdp)) {
1645 /* d_splice_alias can return an error (EIO) if there is an existing
1646 * connected directory alias for this dentry. Add our dentry manually
1647 * ourselves if this happens. */
1648 d_add(dp, ip);
1650 #if defined(D_SPLICE_ALIAS_LEAK_ON_ERROR)
1651 /* Depending on the kernel version, d_splice_alias may or may not drop
1652 * the inode reference on error. If it didn't, do it here. */
1653 iput(ip);
1654 #endif
1655 return NULL;
1658 if (code) {
1659 if (ip)
1660 iput(ip);
1661 return ERR_PTR(afs_convert_code(code));
1664 iput(ip);
1665 return newdp;
1668 static int
1669 afs_linux_link(struct dentry *olddp, struct inode *dip, struct dentry *newdp)
1671 int code;
1672 cred_t *credp = crref();
1673 const char *name = newdp->d_name.name;
1674 struct inode *oldip = olddp->d_inode;
1676 /* If afs_link returned the vnode, we could instantiate the
1677 * dentry. Since it's not, we drop this one and do a new lookup.
1679 d_drop(newdp);
1681 AFS_GLOCK();
1682 code = afs_link(VTOAFS(oldip), VTOAFS(dip), (char *)name, credp);
1684 AFS_GUNLOCK();
1685 crfree(credp);
1686 return afs_convert_code(code);
1689 /* We have to have a Linux specific sillyrename function, because we
1690 * also have to keep the dcache up to date when we're doing a silly
1691 * rename - so we don't want the generic vnodeops doing this behind our
1692 * back.
1695 static int
1696 afs_linux_sillyrename(struct inode *dir, struct dentry *dentry,
1697 cred_t *credp)
1699 struct vcache *tvc = VTOAFS(dentry->d_inode);
1700 struct dentry *__dp = NULL;
1701 char *__name = NULL;
1702 int code;
1704 if (afs_linux_nfsfs_renamed(dentry))
1705 return EBUSY;
1707 do {
1708 dput(__dp);
1710 AFS_GLOCK();
1711 if (__name)
1712 osi_FreeSmallSpace(__name);
1713 __name = afs_newname();
1714 AFS_GUNLOCK();
1716 __dp = lookup_one_len(__name, dentry->d_parent, strlen(__name));
1718 if (IS_ERR(__dp)) {
1719 osi_FreeSmallSpace(__name);
1720 return EBUSY;
1722 } while (__dp->d_inode != NULL);
1724 AFS_GLOCK();
1725 code = afs_rename(VTOAFS(dir), (char *)dentry->d_name.name,
1726 VTOAFS(dir), (char *)__dp->d_name.name,
1727 credp);
1728 if (!code) {
1729 tvc->mvid.silly_name = __name;
1730 crhold(credp);
1731 if (tvc->uncred) {
1732 crfree(tvc->uncred);
1734 tvc->uncred = credp;
1735 tvc->f.states |= CUnlinked;
1736 afs_linux_set_nfsfs_renamed(dentry);
1738 __dp->d_time = 0; /* force to revalidate */
1739 d_move(dentry, __dp);
1740 } else {
1741 osi_FreeSmallSpace(__name);
1743 AFS_GUNLOCK();
1745 dput(__dp);
1747 return code;
1751 static int
1752 afs_linux_unlink(struct inode *dip, struct dentry *dp)
1754 int code = EBUSY;
1755 cred_t *credp = crref();
1756 const char *name = dp->d_name.name;
1757 struct vcache *tvc = VTOAFS(dp->d_inode);
1759 if (VREFCOUNT(tvc) > 1 && tvc->opens > 0
1760 && !(tvc->f.states & CUnlinked)) {
1762 code = afs_linux_sillyrename(dip, dp, credp);
1763 } else {
1764 AFS_GLOCK();
1765 code = afs_remove(VTOAFS(dip), (char *)name, credp);
1766 AFS_GUNLOCK();
1767 if (!code)
1768 d_drop(dp);
1771 crfree(credp);
1772 return afs_convert_code(code);
1776 static int
1777 afs_linux_symlink(struct inode *dip, struct dentry *dp, const char *target)
1779 int code;
1780 cred_t *credp = crref();
1781 struct vattr *vattr = NULL;
1782 const char *name = dp->d_name.name;
1784 /* If afs_symlink returned the vnode, we could instantiate the
1785 * dentry. Since it's not, we drop this one and do a new lookup.
1787 d_drop(dp);
1789 AFS_GLOCK();
1790 code = afs_CreateAttr(&vattr);
1791 if (code) {
1792 goto out;
1795 code = afs_symlink(VTOAFS(dip), (char *)name, vattr, (char *)target, NULL,
1796 credp);
1797 afs_DestroyAttr(vattr);
1799 out:
1800 AFS_GUNLOCK();
1801 crfree(credp);
1802 return afs_convert_code(code);
1805 static int
1806 #if defined(IOP_MKDIR_TAKES_UMODE_T)
1807 afs_linux_mkdir(struct inode *dip, struct dentry *dp, umode_t mode)
1808 #else
1809 afs_linux_mkdir(struct inode *dip, struct dentry *dp, int mode)
1810 #endif
1812 int code;
1813 cred_t *credp = crref();
1814 struct vcache *tvcp = NULL;
1815 struct vattr *vattr = NULL;
1816 const char *name = dp->d_name.name;
1818 AFS_GLOCK();
1819 code = afs_CreateAttr(&vattr);
1820 if (code) {
1821 goto out;
1824 vattr->va_mask = ATTR_MODE;
1825 vattr->va_mode = mode;
1827 code = afs_mkdir(VTOAFS(dip), (char *)name, vattr, &tvcp, credp);
1829 if (tvcp) {
1830 struct inode *ip = AFSTOV(tvcp);
1832 afs_getattr(tvcp, vattr, credp);
1833 afs_fill_inode(ip, vattr);
1835 #if !defined(STRUCT_SUPER_BLOCK_HAS_S_D_OP)
1836 dp->d_op = &afs_dentry_operations;
1837 #endif
1838 dp->d_time = parent_vcache_dv(dip, credp);
1839 d_instantiate(dp, ip);
1841 afs_DestroyAttr(vattr);
1843 out:
1844 AFS_GUNLOCK();
1846 crfree(credp);
1847 return afs_convert_code(code);
1850 static int
1851 afs_linux_rmdir(struct inode *dip, struct dentry *dp)
1853 int code;
1854 cred_t *credp = crref();
1855 const char *name = dp->d_name.name;
1857 /* locking kernel conflicts with glock? */
1859 AFS_GLOCK();
1860 code = afs_rmdir(VTOAFS(dip), (char *)name, credp);
1861 AFS_GUNLOCK();
1863 /* Linux likes to see ENOTEMPTY returned from an rmdir() syscall
1864 * that failed because a directory is not empty. So, we map
1865 * EEXIST to ENOTEMPTY on linux.
1867 if (code == EEXIST) {
1868 code = ENOTEMPTY;
1871 if (!code) {
1872 d_drop(dp);
1875 crfree(credp);
1876 return afs_convert_code(code);
1880 static int
1881 afs_linux_rename(struct inode *oldip, struct dentry *olddp,
1882 struct inode *newip, struct dentry *newdp
1883 #ifdef HAVE_LINUX_INODE_OPERATIONS_RENAME_TAKES_FLAGS
1884 , unsigned int flags
1885 #endif
1888 int code;
1889 cred_t *credp = crref();
1890 const char *oldname = olddp->d_name.name;
1891 const char *newname = newdp->d_name.name;
1892 struct dentry *rehash = NULL;
1894 #ifdef HAVE_LINUX_INODE_OPERATIONS_RENAME_TAKES_FLAGS
1895 if (flags)
1896 return -EINVAL; /* no support for new flags yet */
1897 #endif
1899 /* Prevent any new references during rename operation. */
1901 if (!d_unhashed(newdp)) {
1902 d_drop(newdp);
1903 rehash = newdp;
1906 afs_maybe_shrink_dcache(olddp);
1908 AFS_GLOCK();
1909 code = afs_rename(VTOAFS(oldip), (char *)oldname, VTOAFS(newip), (char *)newname, credp);
1910 AFS_GUNLOCK();
1912 if (!code)
1913 olddp->d_time = 0; /* force to revalidate */
1915 if (rehash)
1916 d_rehash(rehash);
1918 crfree(credp);
1919 return afs_convert_code(code);
1923 /* afs_linux_ireadlink
1924 * Internal readlink which can return link contents to user or kernel space.
1925 * Note that the buffer is NOT supposed to be null-terminated.
1927 static int
1928 afs_linux_ireadlink(struct inode *ip, char *target, int maxlen, uio_seg_t seg)
1930 int code;
1931 cred_t *credp = crref();
1932 struct uio tuio;
1933 struct iovec iov;
1935 memset(&tuio, 0, sizeof(tuio));
1936 memset(&iov, 0, sizeof(iov));
1938 setup_uio(&tuio, &iov, target, (afs_offs_t) 0, maxlen, UIO_READ, seg);
1939 code = afs_readlink(VTOAFS(ip), &tuio, credp);
1940 crfree(credp);
1942 if (!code)
1943 return maxlen - tuio.uio_resid;
1944 else
1945 return afs_convert_code(code);
1948 #if !defined(USABLE_KERNEL_PAGE_SYMLINK_CACHE)
1949 /* afs_linux_readlink
1950 * Fill target (which is in user space) with contents of symlink.
1952 static int
1953 afs_linux_readlink(struct dentry *dp, char *target, int maxlen)
1955 int code;
1956 struct inode *ip = dp->d_inode;
1958 AFS_GLOCK();
1959 code = afs_linux_ireadlink(ip, target, maxlen, AFS_UIOUSER);
1960 AFS_GUNLOCK();
1961 return code;
1965 /* afs_linux_follow_link
1966 * a file system dependent link following routine.
1968 #if defined(HAVE_LINUX_INODE_OPERATIONS_FOLLOW_LINK_NO_NAMEIDATA)
1969 static const char *afs_linux_follow_link(struct dentry *dentry, void **link_data)
1970 #else
1971 static int afs_linux_follow_link(struct dentry *dentry, struct nameidata *nd)
1972 #endif
1974 int code;
1975 char *name;
1977 name = kmalloc(PATH_MAX, GFP_NOFS);
1978 if (!name) {
1979 #if defined(HAVE_LINUX_INODE_OPERATIONS_FOLLOW_LINK_NO_NAMEIDATA)
1980 return ERR_PTR(-EIO);
1981 #else
1982 return -EIO;
1983 #endif
1986 AFS_GLOCK();
1987 code = afs_linux_ireadlink(dentry->d_inode, name, PATH_MAX - 1, AFS_UIOSYS);
1988 AFS_GUNLOCK();
1990 if (code < 0) {
1991 #if defined(HAVE_LINUX_INODE_OPERATIONS_FOLLOW_LINK_NO_NAMEIDATA)
1992 return ERR_PTR(code);
1993 #else
1994 return code;
1995 #endif
1998 name[code] = '\0';
1999 #if defined(HAVE_LINUX_INODE_OPERATIONS_FOLLOW_LINK_NO_NAMEIDATA)
2000 return *link_data = name;
2001 #else
2002 nd_set_link(nd, name);
2003 return 0;
2004 #endif
2007 #if defined(HAVE_LINUX_INODE_OPERATIONS_PUT_LINK_NO_NAMEIDATA)
2008 static void
2009 afs_linux_put_link(struct inode *inode, void *link_data)
2011 char *name = link_data;
2013 if (name && !IS_ERR(name))
2014 kfree(name);
2016 #else
2017 static void
2018 afs_linux_put_link(struct dentry *dentry, struct nameidata *nd)
2020 char *name = nd_get_link(nd);
2022 if (name && !IS_ERR(name))
2023 kfree(name);
2025 #endif /* HAVE_LINUX_INODE_OPERATIONS_PUT_LINK_NO_NAMEIDATA */
2027 #endif /* USABLE_KERNEL_PAGE_SYMLINK_CACHE */
2029 /* Populate a page by filling it from the cache file pointed at by cachefp
2030 * (which contains indicated chunk)
2031 * If task is NULL, the page copy occurs syncronously, and the routine
2032 * returns with page still locked. If task is non-NULL, then page copies
2033 * may occur in the background, and the page will be unlocked when it is
2034 * ready for use.
2036 static int
2037 afs_linux_read_cache(struct file *cachefp, struct page *page,
2038 int chunk, struct pagevec *lrupv,
2039 struct afs_pagecopy_task *task) {
2040 loff_t offset = page_offset(page);
2041 struct inode *cacheinode = cachefp->f_dentry->d_inode;
2042 struct page *newpage, *cachepage;
2043 struct address_space *cachemapping;
2044 int pageindex;
2045 int code = 0;
2047 cachemapping = cacheinode->i_mapping;
2048 newpage = NULL;
2049 cachepage = NULL;
2051 /* If we're trying to read a page that's past the end of the disk
2052 * cache file, then just return a zeroed page */
2053 if (AFS_CHUNKOFFSET(offset) >= i_size_read(cacheinode)) {
2054 zero_user_segment(page, 0, PAGE_SIZE);
2055 SetPageUptodate(page);
2056 if (task)
2057 unlock_page(page);
2058 return 0;
2061 /* From our offset, we now need to work out which page in the disk
2062 * file it corresponds to. This will be fun ... */
2063 pageindex = (offset - AFS_CHUNKTOBASE(chunk)) >> PAGE_SHIFT;
2065 while (cachepage == NULL) {
2066 cachepage = find_get_page(cachemapping, pageindex);
2067 if (!cachepage) {
2068 if (!newpage)
2069 newpage = page_cache_alloc(cachemapping);
2070 if (!newpage) {
2071 code = -ENOMEM;
2072 goto out;
2075 code = add_to_page_cache(newpage, cachemapping,
2076 pageindex, GFP_KERNEL);
2077 if (code == 0) {
2078 cachepage = newpage;
2079 newpage = NULL;
2081 get_page(cachepage);
2082 if (!pagevec_add(lrupv, cachepage))
2083 __pagevec_lru_add_file(lrupv);
2085 } else {
2086 put_page(newpage);
2087 newpage = NULL;
2088 if (code != -EEXIST)
2089 goto out;
2091 } else {
2092 lock_page(cachepage);
2096 if (!PageUptodate(cachepage)) {
2097 ClearPageError(cachepage);
2098 code = cachemapping->a_ops->readpage(NULL, cachepage);
2099 if (!code && !task) {
2100 wait_on_page_locked(cachepage);
2102 } else {
2103 unlock_page(cachepage);
2106 if (!code) {
2107 if (PageUptodate(cachepage)) {
2108 copy_highpage(page, cachepage);
2109 flush_dcache_page(page);
2110 SetPageUptodate(page);
2112 if (task)
2113 unlock_page(page);
2114 } else if (task) {
2115 afs_pagecopy_queue_page(task, cachepage, page);
2116 } else {
2117 code = -EIO;
2121 if (code && task) {
2122 unlock_page(page);
2125 out:
2126 if (cachepage)
2127 put_page(cachepage);
2129 return code;
2132 static int inline
2133 afs_linux_readpage_fastpath(struct file *fp, struct page *pp, int *codep)
2135 loff_t offset = page_offset(pp);
2136 struct inode *ip = FILE_INODE(fp);
2137 struct vcache *avc = VTOAFS(ip);
2138 struct dcache *tdc;
2139 struct file *cacheFp = NULL;
2140 int code;
2141 int dcLocked = 0;
2142 struct pagevec lrupv;
2144 /* Not a UFS cache, don't do anything */
2145 if (cacheDiskType != AFS_FCACHE_TYPE_UFS)
2146 return 0;
2148 /* No readpage (ex: tmpfs) , skip */
2149 if (cachefs_noreadpage)
2150 return 0;
2152 /* Can't do anything if the vcache isn't statd , or if the read
2153 * crosses a chunk boundary.
2155 if (!(avc->f.states & CStatd) ||
2156 AFS_CHUNK(offset) != AFS_CHUNK(offset + PAGE_SIZE)) {
2157 return 0;
2160 ObtainWriteLock(&avc->lock, 911);
2162 /* XXX - See if hinting actually makes things faster !!! */
2164 /* See if we have a suitable entry already cached */
2165 tdc = avc->dchint;
2167 if (tdc) {
2168 /* We need to lock xdcache, then dcache, to handle situations where
2169 * the hint is on the free list. However, we can't safely do this
2170 * according to the locking hierarchy. So, use a non blocking lock.
2172 ObtainReadLock(&afs_xdcache);
2173 dcLocked = ( 0 == NBObtainReadLock(&tdc->lock));
2175 if (dcLocked && (tdc->index != NULLIDX)
2176 && !FidCmp(&tdc->f.fid, &avc->f.fid)
2177 && tdc->f.chunk == AFS_CHUNK(offset)
2178 && !(afs_indexFlags[tdc->index] & (IFFree | IFDiscarded))) {
2179 /* Bonus - the hint was correct */
2180 afs_RefDCache(tdc);
2181 } else {
2182 /* Only destroy the hint if its actually invalid, not if there's
2183 * just been a locking failure */
2184 if (dcLocked) {
2185 ReleaseReadLock(&tdc->lock);
2186 avc->dchint = NULL;
2189 tdc = NULL;
2190 dcLocked = 0;
2192 ReleaseReadLock(&afs_xdcache);
2195 /* No hint, or hint is no longer valid - see if we can get something
2196 * directly from the dcache
2198 if (!tdc)
2199 tdc = afs_FindDCache(avc, offset);
2201 if (!tdc) {
2202 ReleaseWriteLock(&avc->lock);
2203 return 0;
2206 if (!dcLocked)
2207 ObtainReadLock(&tdc->lock);
2209 /* Is the dcache we've been given currently up to date */
2210 if (!hsame(avc->f.m.DataVersion, tdc->f.versionNo) ||
2211 (tdc->dflags & DFFetching))
2212 goto out;
2214 /* Update our hint for future abuse */
2215 avc->dchint = tdc;
2217 /* Okay, so we've now got a cache file that is up to date */
2219 /* XXX - I suspect we should be locking the inodes before we use them! */
2220 AFS_GUNLOCK();
2221 cacheFp = afs_linux_raw_open(&tdc->f.inode);
2222 if (!cacheFp->f_dentry->d_inode->i_mapping->a_ops->readpage) {
2223 cachefs_noreadpage = 1;
2224 AFS_GLOCK();
2225 goto out;
2227 #if defined(PAGEVEC_INIT_COLD_ARG)
2228 pagevec_init(&lrupv, 0);
2229 #else
2230 pagevec_init(&lrupv);
2231 #endif
2233 code = afs_linux_read_cache(cacheFp, pp, tdc->f.chunk, &lrupv, NULL);
2235 if (pagevec_count(&lrupv))
2236 __pagevec_lru_add_file(&lrupv);
2238 filp_close(cacheFp, NULL);
2239 AFS_GLOCK();
2241 ReleaseReadLock(&tdc->lock);
2242 ReleaseWriteLock(&avc->lock);
2243 afs_PutDCache(tdc);
2245 *codep = code;
2246 return 1;
2248 out:
2249 ReleaseWriteLock(&avc->lock);
2250 ReleaseReadLock(&tdc->lock);
2251 afs_PutDCache(tdc);
2252 return 0;
2255 /* afs_linux_readpage
2257 * This function is split into two, because prepare_write/begin_write
2258 * require a readpage call which doesn't unlock the resulting page upon
2259 * success.
2261 static int
2262 afs_linux_fillpage(struct file *fp, struct page *pp)
2264 afs_int32 code;
2265 char *address;
2266 struct uio *auio;
2267 struct iovec *iovecp;
2268 struct inode *ip = FILE_INODE(fp);
2269 afs_int32 cnt = page_count(pp);
2270 struct vcache *avc = VTOAFS(ip);
2271 afs_offs_t offset = page_offset(pp);
2272 cred_t *credp;
2274 AFS_GLOCK();
2275 if (afs_linux_readpage_fastpath(fp, pp, &code)) {
2276 AFS_GUNLOCK();
2277 return code;
2279 AFS_GUNLOCK();
2281 credp = crref();
2282 address = kmap(pp);
2283 ClearPageError(pp);
2285 auio = kmalloc(sizeof(struct uio), GFP_NOFS);
2286 iovecp = kmalloc(sizeof(struct iovec), GFP_NOFS);
2288 setup_uio(auio, iovecp, (char *)address, offset, PAGE_SIZE, UIO_READ,
2289 AFS_UIOSYS);
2291 AFS_GLOCK();
2292 AFS_DISCON_LOCK();
2293 afs_Trace4(afs_iclSetp, CM_TRACE_READPAGE, ICL_TYPE_POINTER, ip,
2294 ICL_TYPE_POINTER, pp, ICL_TYPE_INT32, cnt, ICL_TYPE_INT32,
2295 99999); /* not a possible code value */
2297 code = afs_rdwr(avc, auio, UIO_READ, 0, credp);
2299 afs_Trace4(afs_iclSetp, CM_TRACE_READPAGE, ICL_TYPE_POINTER, ip,
2300 ICL_TYPE_POINTER, pp, ICL_TYPE_INT32, cnt, ICL_TYPE_INT32,
2301 code);
2302 AFS_DISCON_UNLOCK();
2303 AFS_GUNLOCK();
2304 if (!code) {
2305 /* XXX valid for no-cache also? Check last bits of files... :)
2306 * Cognate code goes in afs_NoCacheFetchProc. */
2307 if (auio->uio_resid) /* zero remainder of page */
2308 memset((void *)(address + (PAGE_SIZE - auio->uio_resid)), 0,
2309 auio->uio_resid);
2311 flush_dcache_page(pp);
2312 SetPageUptodate(pp);
2313 } /* !code */
2315 kunmap(pp);
2317 kfree(auio);
2318 kfree(iovecp);
2320 crfree(credp);
2321 return afs_convert_code(code);
2324 static int
2325 afs_linux_prefetch(struct file *fp, struct page *pp)
2327 int code = 0;
2328 struct vcache *avc = VTOAFS(FILE_INODE(fp));
2329 afs_offs_t offset = page_offset(pp);
2331 if (AFS_CHUNKOFFSET(offset) == 0) {
2332 struct dcache *tdc;
2333 struct vrequest *treq = NULL;
2334 cred_t *credp;
2336 credp = crref();
2337 AFS_GLOCK();
2338 code = afs_CreateReq(&treq, credp);
2339 if (!code && !NBObtainWriteLock(&avc->lock, 534)) {
2340 tdc = afs_FindDCache(avc, offset);
2341 if (tdc) {
2342 if (!(tdc->mflags & DFNextStarted))
2343 afs_PrefetchChunk(avc, tdc, credp, treq);
2344 afs_PutDCache(tdc);
2346 ReleaseWriteLock(&avc->lock);
2348 afs_DestroyReq(treq);
2349 AFS_GUNLOCK();
2350 crfree(credp);
2352 return afs_convert_code(code);
2356 static int
2357 afs_linux_bypass_readpages(struct file *fp, struct address_space *mapping,
2358 struct list_head *page_list, unsigned num_pages)
2360 afs_int32 page_ix;
2361 struct uio *auio;
2362 afs_offs_t offset;
2363 struct iovec* iovecp;
2364 struct nocache_read_request *ancr;
2365 struct page *pp;
2366 struct pagevec lrupv;
2367 afs_int32 code = 0;
2369 cred_t *credp;
2370 struct inode *ip = FILE_INODE(fp);
2371 struct vcache *avc = VTOAFS(ip);
2372 afs_int32 base_index = 0;
2373 afs_int32 page_count = 0;
2374 afs_int32 isize;
2376 /* background thread must free: iovecp, auio, ancr */
2377 iovecp = osi_Alloc(num_pages * sizeof(struct iovec));
2379 auio = osi_Alloc(sizeof(struct uio));
2380 auio->uio_iov = iovecp;
2381 auio->uio_iovcnt = num_pages;
2382 auio->uio_flag = UIO_READ;
2383 auio->uio_seg = AFS_UIOSYS;
2384 auio->uio_resid = num_pages * PAGE_SIZE;
2386 ancr = osi_Alloc(sizeof(struct nocache_read_request));
2387 ancr->auio = auio;
2388 ancr->offset = auio->uio_offset;
2389 ancr->length = auio->uio_resid;
2391 #if defined(PAGEVEC_INIT_COLD_ARG)
2392 pagevec_init(&lrupv, 0);
2393 #else
2394 pagevec_init(&lrupv);
2395 #endif
2397 for(page_ix = 0; page_ix < num_pages; ++page_ix) {
2399 if(list_empty(page_list))
2400 break;
2402 pp = list_entry(page_list->prev, struct page, lru);
2403 /* If we allocate a page and don't remove it from page_list,
2404 * the page cache gets upset. */
2405 list_del(&pp->lru);
2406 isize = (i_size_read(fp->f_mapping->host) - 1) >> PAGE_SHIFT;
2407 if(pp->index > isize) {
2408 if(PageLocked(pp))
2409 unlock_page(pp);
2410 continue;
2413 if(page_ix == 0) {
2414 offset = page_offset(pp);
2415 ancr->offset = auio->uio_offset = offset;
2416 base_index = pp->index;
2418 iovecp[page_ix].iov_len = PAGE_SIZE;
2419 code = add_to_page_cache(pp, mapping, pp->index, GFP_KERNEL);
2420 if(base_index != pp->index) {
2421 if(PageLocked(pp))
2422 unlock_page(pp);
2423 put_page(pp);
2424 iovecp[page_ix].iov_base = (void *) 0;
2425 base_index++;
2426 ancr->length -= PAGE_SIZE;
2427 continue;
2429 base_index++;
2430 if(code) {
2431 if(PageLocked(pp))
2432 unlock_page(pp);
2433 put_page(pp);
2434 iovecp[page_ix].iov_base = (void *) 0;
2435 } else {
2436 page_count++;
2437 if(!PageLocked(pp)) {
2438 lock_page(pp);
2441 /* increment page refcount--our original design assumed
2442 * that locking it would effectively pin it; protect
2443 * ourselves from the possiblity that this assumption is
2444 * is faulty, at low cost (provided we do not fail to
2445 * do the corresponding decref on the other side) */
2446 get_page(pp);
2448 /* save the page for background map */
2449 iovecp[page_ix].iov_base = (void*) pp;
2451 /* and put it on the LRU cache */
2452 if (!pagevec_add(&lrupv, pp))
2453 __pagevec_lru_add_file(&lrupv);
2457 /* If there were useful pages in the page list, make sure all pages
2458 * are in the LRU cache, then schedule the read */
2459 if(page_count) {
2460 if (pagevec_count(&lrupv))
2461 __pagevec_lru_add_file(&lrupv);
2462 credp = crref();
2463 code = afs_ReadNoCache(avc, ancr, credp);
2464 crfree(credp);
2465 } else {
2466 /* If there is nothing for the background thread to handle,
2467 * it won't be freeing the things that we never gave it */
2468 osi_Free(iovecp, num_pages * sizeof(struct iovec));
2469 osi_Free(auio, sizeof(struct uio));
2470 osi_Free(ancr, sizeof(struct nocache_read_request));
2472 /* we do not flush, release, or unmap pages--that will be
2473 * done for us by the background thread as each page comes in
2474 * from the fileserver */
2475 return afs_convert_code(code);
2479 static int
2480 afs_linux_bypass_readpage(struct file *fp, struct page *pp)
2482 cred_t *credp = NULL;
2483 struct uio *auio;
2484 struct iovec *iovecp;
2485 struct nocache_read_request *ancr;
2486 int code;
2489 * Special case: if page is at or past end of file, just zero it and set
2490 * it as up to date.
2492 if (page_offset(pp) >= i_size_read(fp->f_mapping->host)) {
2493 zero_user_segment(pp, 0, PAGE_SIZE);
2494 SetPageUptodate(pp);
2495 unlock_page(pp);
2496 return 0;
2499 ClearPageError(pp);
2501 /* receiver frees */
2502 auio = osi_Alloc(sizeof(struct uio));
2503 iovecp = osi_Alloc(sizeof(struct iovec));
2505 /* address can be NULL, because we overwrite it with 'pp', below */
2506 setup_uio(auio, iovecp, NULL, page_offset(pp),
2507 PAGE_SIZE, UIO_READ, AFS_UIOSYS);
2509 /* save the page for background map */
2510 get_page(pp); /* see above */
2511 auio->uio_iov->iov_base = (void*) pp;
2512 /* the background thread will free this */
2513 ancr = osi_Alloc(sizeof(struct nocache_read_request));
2514 ancr->auio = auio;
2515 ancr->offset = page_offset(pp);
2516 ancr->length = PAGE_SIZE;
2518 credp = crref();
2519 code = afs_ReadNoCache(VTOAFS(FILE_INODE(fp)), ancr, credp);
2520 crfree(credp);
2522 return afs_convert_code(code);
2525 static inline int
2526 afs_linux_can_bypass(struct inode *ip) {
2528 switch(cache_bypass_strategy) {
2529 case NEVER_BYPASS_CACHE:
2530 return 0;
2531 case ALWAYS_BYPASS_CACHE:
2532 return 1;
2533 case LARGE_FILES_BYPASS_CACHE:
2534 if (i_size_read(ip) > cache_bypass_threshold)
2535 return 1;
2536 default:
2537 return 0;
2541 /* Check if a file is permitted to bypass the cache by policy, and modify
2542 * the cache bypass state recorded for that file */
2544 static inline int
2545 afs_linux_bypass_check(struct inode *ip) {
2546 cred_t* credp;
2548 int bypass = afs_linux_can_bypass(ip);
2550 credp = crref();
2551 trydo_cache_transition(VTOAFS(ip), credp, bypass);
2552 crfree(credp);
2554 return bypass;
2558 static int
2559 afs_linux_readpage(struct file *fp, struct page *pp)
2561 int code;
2563 if (afs_linux_bypass_check(FILE_INODE(fp))) {
2564 code = afs_linux_bypass_readpage(fp, pp);
2565 } else {
2566 code = afs_linux_fillpage(fp, pp);
2567 if (!code)
2568 code = afs_linux_prefetch(fp, pp);
2569 unlock_page(pp);
2572 return code;
2575 /* Readpages reads a number of pages for a particular file. We use
2576 * this to optimise the reading, by limiting the number of times upon which
2577 * we have to lookup, lock and open vcaches and dcaches
2580 static int
2581 afs_linux_readpages(struct file *fp, struct address_space *mapping,
2582 struct list_head *page_list, unsigned int num_pages)
2584 struct inode *inode = mapping->host;
2585 struct vcache *avc = VTOAFS(inode);
2586 struct dcache *tdc;
2587 struct file *cacheFp = NULL;
2588 int code;
2589 unsigned int page_idx;
2590 loff_t offset;
2591 struct pagevec lrupv;
2592 struct afs_pagecopy_task *task;
2594 if (afs_linux_bypass_check(inode))
2595 return afs_linux_bypass_readpages(fp, mapping, page_list, num_pages);
2597 if (cacheDiskType == AFS_FCACHE_TYPE_MEM)
2598 return 0;
2600 /* No readpage (ex: tmpfs) , skip */
2601 if (cachefs_noreadpage)
2602 return 0;
2604 AFS_GLOCK();
2605 if ((code = afs_linux_VerifyVCache(avc, NULL))) {
2606 AFS_GUNLOCK();
2607 return code;
2610 ObtainWriteLock(&avc->lock, 912);
2611 AFS_GUNLOCK();
2613 task = afs_pagecopy_init_task();
2615 tdc = NULL;
2616 #if defined(PAGEVEC_INIT_COLD_ARG)
2617 pagevec_init(&lrupv, 0);
2618 #else
2619 pagevec_init(&lrupv);
2620 #endif
2621 for (page_idx = 0; page_idx < num_pages; page_idx++) {
2622 struct page *page = list_entry(page_list->prev, struct page, lru);
2623 list_del(&page->lru);
2624 offset = page_offset(page);
2626 if (tdc && tdc->f.chunk != AFS_CHUNK(offset)) {
2627 AFS_GLOCK();
2628 ReleaseReadLock(&tdc->lock);
2629 afs_PutDCache(tdc);
2630 AFS_GUNLOCK();
2631 tdc = NULL;
2632 if (cacheFp)
2633 filp_close(cacheFp, NULL);
2636 if (!tdc) {
2637 AFS_GLOCK();
2638 if ((tdc = afs_FindDCache(avc, offset))) {
2639 ObtainReadLock(&tdc->lock);
2640 if (!hsame(avc->f.m.DataVersion, tdc->f.versionNo) ||
2641 (tdc->dflags & DFFetching)) {
2642 ReleaseReadLock(&tdc->lock);
2643 afs_PutDCache(tdc);
2644 tdc = NULL;
2647 AFS_GUNLOCK();
2648 if (tdc) {
2649 cacheFp = afs_linux_raw_open(&tdc->f.inode);
2650 if (!cacheFp->f_dentry->d_inode->i_mapping->a_ops->readpage) {
2651 cachefs_noreadpage = 1;
2652 goto out;
2657 if (tdc && !add_to_page_cache(page, mapping, page->index,
2658 GFP_KERNEL)) {
2659 get_page(page);
2660 if (!pagevec_add(&lrupv, page))
2661 __pagevec_lru_add_file(&lrupv);
2663 afs_linux_read_cache(cacheFp, page, tdc->f.chunk, &lrupv, task);
2665 put_page(page);
2667 if (pagevec_count(&lrupv))
2668 __pagevec_lru_add_file(&lrupv);
2670 out:
2671 if (tdc)
2672 filp_close(cacheFp, NULL);
2674 afs_pagecopy_put_task(task);
2676 AFS_GLOCK();
2677 if (tdc) {
2678 ReleaseReadLock(&tdc->lock);
2679 afs_PutDCache(tdc);
2682 ReleaseWriteLock(&avc->lock);
2683 AFS_GUNLOCK();
2684 return 0;
2687 /* Prepare an AFS vcache for writeback. Should be called with the vcache
2688 * locked */
2689 static inline int
2690 afs_linux_prepare_writeback(struct vcache *avc) {
2691 pid_t pid;
2692 struct pagewriter *pw;
2694 pid = MyPidxx2Pid(MyPidxx);
2695 /* Prevent recursion into the writeback code */
2696 spin_lock(&avc->pagewriter_lock);
2697 list_for_each_entry(pw, &avc->pagewriters, link) {
2698 if (pw->writer == pid) {
2699 spin_unlock(&avc->pagewriter_lock);
2700 return AOP_WRITEPAGE_ACTIVATE;
2703 spin_unlock(&avc->pagewriter_lock);
2705 /* Add ourselves to writer list */
2706 pw = osi_Alloc(sizeof(struct pagewriter));
2707 pw->writer = pid;
2708 spin_lock(&avc->pagewriter_lock);
2709 list_add_tail(&pw->link, &avc->pagewriters);
2710 spin_unlock(&avc->pagewriter_lock);
2712 return 0;
2715 static inline int
2716 afs_linux_dopartialwrite(struct vcache *avc, cred_t *credp) {
2717 struct vrequest *treq = NULL;
2718 int code = 0;
2720 if (!afs_CreateReq(&treq, credp)) {
2721 code = afs_DoPartialWrite(avc, treq);
2722 afs_DestroyReq(treq);
2725 return afs_convert_code(code);
2728 static inline void
2729 afs_linux_complete_writeback(struct vcache *avc) {
2730 struct pagewriter *pw, *store;
2731 pid_t pid;
2732 struct list_head tofree;
2734 INIT_LIST_HEAD(&tofree);
2735 pid = MyPidxx2Pid(MyPidxx);
2736 /* Remove ourselves from writer list */
2737 spin_lock(&avc->pagewriter_lock);
2738 list_for_each_entry_safe(pw, store, &avc->pagewriters, link) {
2739 if (pw->writer == pid) {
2740 list_del(&pw->link);
2741 /* osi_Free may sleep so we need to defer it */
2742 list_add_tail(&pw->link, &tofree);
2745 spin_unlock(&avc->pagewriter_lock);
2746 list_for_each_entry_safe(pw, store, &tofree, link) {
2747 list_del(&pw->link);
2748 osi_Free(pw, sizeof(struct pagewriter));
2752 /* Writeback a given page syncronously. Called with no AFS locks held */
2753 static int
2754 afs_linux_page_writeback(struct inode *ip, struct page *pp,
2755 unsigned long offset, unsigned int count,
2756 cred_t *credp)
2758 struct vcache *vcp = VTOAFS(ip);
2759 char *buffer;
2760 afs_offs_t base;
2761 int code = 0;
2762 struct uio tuio;
2763 struct iovec iovec;
2764 int f_flags = 0;
2766 memset(&tuio, 0, sizeof(tuio));
2767 memset(&iovec, 0, sizeof(iovec));
2769 buffer = kmap(pp) + offset;
2770 base = page_offset(pp) + offset;
2772 AFS_GLOCK();
2773 afs_Trace4(afs_iclSetp, CM_TRACE_UPDATEPAGE, ICL_TYPE_POINTER, vcp,
2774 ICL_TYPE_POINTER, pp, ICL_TYPE_INT32, page_count(pp),
2775 ICL_TYPE_INT32, 99999);
2777 setup_uio(&tuio, &iovec, buffer, base, count, UIO_WRITE, AFS_UIOSYS);
2779 code = afs_write(vcp, &tuio, f_flags, credp, 0);
2781 i_size_write(ip, vcp->f.m.Length);
2782 ip->i_blocks = ((vcp->f.m.Length + 1023) >> 10) << 1;
2784 code = code ? afs_convert_code(code) : count - tuio.uio_resid;
2786 afs_Trace4(afs_iclSetp, CM_TRACE_UPDATEPAGE, ICL_TYPE_POINTER, vcp,
2787 ICL_TYPE_POINTER, pp, ICL_TYPE_INT32, page_count(pp),
2788 ICL_TYPE_INT32, code);
2790 AFS_GUNLOCK();
2791 kunmap(pp);
2793 return code;
2796 static int
2797 afs_linux_writepage_sync(struct inode *ip, struct page *pp,
2798 unsigned long offset, unsigned int count)
2800 int code;
2801 int code1 = 0;
2802 struct vcache *vcp = VTOAFS(ip);
2803 cred_t *credp;
2805 /* Catch recursive writeback. This occurs if the kernel decides
2806 * writeback is required whilst we are writing to the cache, or
2807 * flushing to the server. When we're running syncronously (as
2808 * opposed to from writepage) we can't actually do anything about
2809 * this case - as we can't return AOP_WRITEPAGE_ACTIVATE to write()
2811 AFS_GLOCK();
2812 ObtainWriteLock(&vcp->lock, 532);
2813 afs_linux_prepare_writeback(vcp);
2814 ReleaseWriteLock(&vcp->lock);
2815 AFS_GUNLOCK();
2817 credp = crref();
2818 code = afs_linux_page_writeback(ip, pp, offset, count, credp);
2820 AFS_GLOCK();
2821 ObtainWriteLock(&vcp->lock, 533);
2822 if (code > 0)
2823 code1 = afs_linux_dopartialwrite(vcp, credp);
2824 afs_linux_complete_writeback(vcp);
2825 ReleaseWriteLock(&vcp->lock);
2826 AFS_GUNLOCK();
2827 crfree(credp);
2829 if (code1)
2830 return code1;
2832 return code;
2835 static int
2836 #ifdef AOP_WRITEPAGE_TAKES_WRITEBACK_CONTROL
2837 afs_linux_writepage(struct page *pp, struct writeback_control *wbc)
2838 #else
2839 afs_linux_writepage(struct page *pp)
2840 #endif
2842 struct address_space *mapping = pp->mapping;
2843 struct inode *inode;
2844 struct vcache *vcp;
2845 cred_t *credp;
2846 unsigned int to = PAGE_SIZE;
2847 loff_t isize;
2848 int code = 0;
2849 int code1 = 0;
2851 get_page(pp);
2853 inode = mapping->host;
2854 vcp = VTOAFS(inode);
2855 isize = i_size_read(inode);
2857 /* Don't defeat an earlier truncate */
2858 if (page_offset(pp) > isize) {
2859 set_page_writeback(pp);
2860 unlock_page(pp);
2861 goto done;
2864 AFS_GLOCK();
2865 ObtainWriteLock(&vcp->lock, 537);
2866 code = afs_linux_prepare_writeback(vcp);
2867 if (code == AOP_WRITEPAGE_ACTIVATE) {
2868 /* WRITEPAGE_ACTIVATE is the only return value that permits us
2869 * to return with the page still locked */
2870 ReleaseWriteLock(&vcp->lock);
2871 AFS_GUNLOCK();
2872 return code;
2875 /* Grab the creds structure currently held in the vnode, and
2876 * get a reference to it, in case it goes away ... */
2877 credp = vcp->cred;
2878 if (credp)
2879 crhold(credp);
2880 else
2881 credp = crref();
2882 ReleaseWriteLock(&vcp->lock);
2883 AFS_GUNLOCK();
2885 set_page_writeback(pp);
2887 SetPageUptodate(pp);
2889 /* We can unlock the page here, because it's protected by the
2890 * page_writeback flag. This should make us less vulnerable to
2891 * deadlocking in afs_write and afs_DoPartialWrite
2893 unlock_page(pp);
2895 /* If this is the final page, then just write the number of bytes that
2896 * are actually in it */
2897 if ((isize - page_offset(pp)) < to )
2898 to = isize - page_offset(pp);
2900 code = afs_linux_page_writeback(inode, pp, 0, to, credp);
2902 AFS_GLOCK();
2903 ObtainWriteLock(&vcp->lock, 538);
2905 /* As much as we might like to ignore a file server error here,
2906 * and just try again when we close(), unfortunately StoreAllSegments
2907 * will invalidate our chunks if the server returns a permanent error,
2908 * so we need to at least try and get that error back to the user
2910 if (code == to)
2911 code1 = afs_linux_dopartialwrite(vcp, credp);
2913 afs_linux_complete_writeback(vcp);
2914 ReleaseWriteLock(&vcp->lock);
2915 crfree(credp);
2916 AFS_GUNLOCK();
2918 done:
2919 end_page_writeback(pp);
2920 put_page(pp);
2922 if (code1)
2923 return code1;
2925 if (code == to)
2926 return 0;
2928 return code;
2931 /* afs_linux_permission
2932 * Check access rights - returns error if can't check or permission denied.
2934 static int
2935 #if defined(IOP_PERMISSION_TAKES_FLAGS)
2936 afs_linux_permission(struct inode *ip, int mode, unsigned int flags)
2937 #elif defined(IOP_PERMISSION_TAKES_NAMEIDATA)
2938 afs_linux_permission(struct inode *ip, int mode, struct nameidata *nd)
2939 #else
2940 afs_linux_permission(struct inode *ip, int mode)
2941 #endif
2943 int code;
2944 cred_t *credp;
2945 int tmp = 0;
2947 /* Check for RCU path walking */
2948 #if defined(IOP_PERMISSION_TAKES_FLAGS)
2949 if (flags & IPERM_FLAG_RCU)
2950 return -ECHILD;
2951 #elif defined(MAY_NOT_BLOCK)
2952 if (mode & MAY_NOT_BLOCK)
2953 return -ECHILD;
2954 #endif
2956 credp = crref();
2957 AFS_GLOCK();
2958 if (mode & MAY_EXEC)
2959 tmp |= VEXEC;
2960 if (mode & MAY_READ)
2961 tmp |= VREAD;
2962 if (mode & MAY_WRITE)
2963 tmp |= VWRITE;
2964 code = afs_access(VTOAFS(ip), tmp, credp);
2966 AFS_GUNLOCK();
2967 crfree(credp);
2968 return afs_convert_code(code);
2971 static int
2972 afs_linux_commit_write(struct file *file, struct page *page, unsigned offset,
2973 unsigned to)
2975 int code;
2976 struct inode *inode = FILE_INODE(file);
2977 loff_t pagebase = page_offset(page);
2979 if (i_size_read(inode) < (pagebase + offset))
2980 i_size_write(inode, pagebase + offset);
2982 if (PageChecked(page)) {
2983 SetPageUptodate(page);
2984 ClearPageChecked(page);
2987 code = afs_linux_writepage_sync(inode, page, offset, to - offset);
2989 return code;
2992 static int
2993 afs_linux_prepare_write(struct file *file, struct page *page, unsigned from,
2994 unsigned to)
2997 /* http://kerneltrap.org/node/4941 details the expected behaviour of
2998 * prepare_write. Essentially, if the page exists within the file,
2999 * and is not being fully written, then we should populate it.
3002 if (!PageUptodate(page)) {
3003 loff_t pagebase = page_offset(page);
3004 loff_t isize = i_size_read(page->mapping->host);
3006 /* Is the location we are writing to beyond the end of the file? */
3007 if (pagebase >= isize ||
3008 ((from == 0) && (pagebase + to) >= isize)) {
3009 zero_user_segments(page, 0, from, to, PAGE_SIZE);
3010 SetPageChecked(page);
3011 /* Are we we writing a full page */
3012 } else if (from == 0 && to == PAGE_SIZE) {
3013 SetPageChecked(page);
3014 /* Is the page readable, if it's wronly, we don't care, because we're
3015 * not actually going to read from it ... */
3016 } else if ((file->f_flags && O_ACCMODE) != O_WRONLY) {
3017 /* We don't care if fillpage fails, because if it does the page
3018 * won't be marked as up to date
3020 afs_linux_fillpage(file, page);
3023 return 0;
3026 #if defined(STRUCT_ADDRESS_SPACE_OPERATIONS_HAS_WRITE_BEGIN)
3027 static int
3028 afs_linux_write_end(struct file *file, struct address_space *mapping,
3029 loff_t pos, unsigned len, unsigned copied,
3030 struct page *page, void *fsdata)
3032 int code;
3033 unsigned int from = pos & (PAGE_SIZE - 1);
3035 code = afs_linux_commit_write(file, page, from, from + copied);
3037 unlock_page(page);
3038 put_page(page);
3039 return code;
3042 static int
3043 afs_linux_write_begin(struct file *file, struct address_space *mapping,
3044 loff_t pos, unsigned len, unsigned flags,
3045 struct page **pagep, void **fsdata)
3047 struct page *page;
3048 pgoff_t index = pos >> PAGE_SHIFT;
3049 unsigned int from = pos & (PAGE_SIZE - 1);
3050 int code;
3052 page = grab_cache_page_write_begin(mapping, index, flags);
3053 *pagep = page;
3055 code = afs_linux_prepare_write(file, page, from, from + len);
3056 if (code) {
3057 unlock_page(page);
3058 put_page(page);
3061 return code;
3063 #endif
3065 #ifndef STRUCT_DENTRY_OPERATIONS_HAS_D_AUTOMOUNT
3066 static void *
3067 afs_linux_dir_follow_link(struct dentry *dentry, struct nameidata *nd)
3069 struct dentry **dpp;
3070 struct dentry *target;
3072 if (current->total_link_count > 0) {
3073 /* avoid symlink resolution limits when resolving; we cannot contribute to
3074 * an infinite symlink loop */
3075 /* only do this for follow_link when total_link_count is positive to be
3076 * on the safe side; there is at least one code path in the Linux
3077 * kernel where it seems like it may be possible to get here without
3078 * total_link_count getting incremented. it is not clear on how that
3079 * path is actually reached, but guard against it just to be safe */
3080 current->total_link_count--;
3083 target = canonical_dentry(dentry->d_inode);
3085 # ifdef STRUCT_NAMEIDATA_HAS_PATH
3086 dpp = &nd->path.dentry;
3087 # else
3088 dpp = &nd->dentry;
3089 # endif
3091 dput(*dpp);
3093 if (target) {
3094 *dpp = target;
3095 } else {
3096 *dpp = dget(dentry);
3099 nd->last_type = LAST_BIND;
3101 return NULL;
3103 #endif /* !STRUCT_DENTRY_OPERATIONS_HAS_D_AUTOMOUNT */
3106 static struct inode_operations afs_file_iops = {
3107 .permission = afs_linux_permission,
3108 .getattr = afs_linux_getattr,
3109 .setattr = afs_notify_change,
3112 static struct address_space_operations afs_file_aops = {
3113 .readpage = afs_linux_readpage,
3114 .readpages = afs_linux_readpages,
3115 .writepage = afs_linux_writepage,
3116 #if defined (STRUCT_ADDRESS_SPACE_OPERATIONS_HAS_WRITE_BEGIN)
3117 .write_begin = afs_linux_write_begin,
3118 .write_end = afs_linux_write_end,
3119 #else
3120 .commit_write = afs_linux_commit_write,
3121 .prepare_write = afs_linux_prepare_write,
3122 #endif
3126 /* Separate ops vector for directories. Linux 2.2 tests type of inode
3127 * by what sort of operation is allowed.....
3130 static struct inode_operations afs_dir_iops = {
3131 .setattr = afs_notify_change,
3132 .create = afs_linux_create,
3133 .lookup = afs_linux_lookup,
3134 .link = afs_linux_link,
3135 .unlink = afs_linux_unlink,
3136 .symlink = afs_linux_symlink,
3137 .mkdir = afs_linux_mkdir,
3138 .rmdir = afs_linux_rmdir,
3139 .rename = afs_linux_rename,
3140 .getattr = afs_linux_getattr,
3141 .permission = afs_linux_permission,
3142 #ifndef STRUCT_DENTRY_OPERATIONS_HAS_D_AUTOMOUNT
3143 .follow_link = afs_linux_dir_follow_link,
3144 #endif
3147 /* We really need a separate symlink set of ops, since do_follow_link()
3148 * determines if it _is_ a link by checking if the follow_link op is set.
3150 #if defined(USABLE_KERNEL_PAGE_SYMLINK_CACHE)
3151 static int
3152 afs_symlink_filler(struct file *file, struct page *page)
3154 struct inode *ip = (struct inode *)page->mapping->host;
3155 char *p = (char *)kmap(page);
3156 int code;
3158 AFS_GLOCK();
3159 code = afs_linux_ireadlink(ip, p, PAGE_SIZE, AFS_UIOSYS);
3160 AFS_GUNLOCK();
3162 if (code < 0)
3163 goto fail;
3164 p[code] = '\0'; /* null terminate? */
3166 SetPageUptodate(page);
3167 kunmap(page);
3168 unlock_page(page);
3169 return 0;
3171 fail:
3172 SetPageError(page);
3173 kunmap(page);
3174 unlock_page(page);
3175 return code;
3178 static struct address_space_operations afs_symlink_aops = {
3179 .readpage = afs_symlink_filler
3181 #endif /* USABLE_KERNEL_PAGE_SYMLINK_CACHE */
3183 static struct inode_operations afs_symlink_iops = {
3184 #if defined(USABLE_KERNEL_PAGE_SYMLINK_CACHE)
3185 .readlink = page_readlink,
3186 # if defined(HAVE_LINUX_PAGE_GET_LINK)
3187 .get_link = page_get_link,
3188 # elif defined(HAVE_LINUX_PAGE_FOLLOW_LINK)
3189 .follow_link = page_follow_link,
3190 # else
3191 .follow_link = page_follow_link_light,
3192 .put_link = page_put_link,
3193 # endif
3194 #else /* !defined(USABLE_KERNEL_PAGE_SYMLINK_CACHE) */
3195 .readlink = afs_linux_readlink,
3196 .follow_link = afs_linux_follow_link,
3197 .put_link = afs_linux_put_link,
3198 #endif /* USABLE_KERNEL_PAGE_SYMLINK_CACHE */
3199 .setattr = afs_notify_change,
3202 void
3203 afs_fill_inode(struct inode *ip, struct vattr *vattr)
3205 if (vattr)
3206 vattr2inode(ip, vattr);
3208 #ifdef STRUCT_ADDRESS_SPACE_HAS_BACKING_DEV_INFO
3209 ip->i_mapping->backing_dev_info = afs_backing_dev_info;
3210 #endif
3211 /* Reset ops if symlink or directory. */
3212 if (S_ISREG(ip->i_mode)) {
3213 ip->i_op = &afs_file_iops;
3214 ip->i_fop = &afs_file_fops;
3215 ip->i_data.a_ops = &afs_file_aops;
3217 } else if (S_ISDIR(ip->i_mode)) {
3218 ip->i_op = &afs_dir_iops;
3219 ip->i_fop = &afs_dir_fops;
3221 } else if (S_ISLNK(ip->i_mode)) {
3222 ip->i_op = &afs_symlink_iops;
3223 #if defined(HAVE_LINUX_INODE_NOHIGHMEM)
3224 inode_nohighmem(ip);
3225 #endif
3226 #if defined(USABLE_KERNEL_PAGE_SYMLINK_CACHE)
3227 ip->i_data.a_ops = &afs_symlink_aops;
3228 ip->i_mapping = &ip->i_data;
3229 #endif