Linux: Test for __vfs_write rather than __vfs_read
[pkg-k5-afs_openafs.git] / src / afs / LINUX / osi_vnodeops.c
blobd935cf56df4892187253a6f8af296962fef9f091
1 /*
2 * Copyright 2000, International Business Machines Corporation and others.
3 * All Rights Reserved.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
8 */
11 * Linux specific vnodeops. Also includes the glue routines required to call
12 * AFS vnodeops.
14 * So far the only truly scary part is that Linux relies on the inode cache
15 * to be up to date. Don't you dare break a callback and expect an fstat
16 * to give you meaningful information. This appears to be fixed in the 2.1
17 * development kernels. As it is we can fix this now by intercepting the
18 * stat calls.
21 #include <afsconfig.h>
22 #include "afs/param.h"
25 #include "afs/sysincludes.h"
26 #include "afsincludes.h"
27 #include "afs/afs_stats.h"
28 #include <linux/mm.h>
29 #ifdef HAVE_MM_INLINE_H
30 #include <linux/mm_inline.h>
31 #endif
32 #include <linux/pagemap.h>
33 #include <linux/writeback.h>
34 #include <linux/pagevec.h>
35 #include <linux/aio.h>
36 #include "afs/lock.h"
37 #include "afs/afs_bypasscache.h"
39 #include "osi_compat.h"
40 #include "osi_pagecopy.h"
42 #ifndef HAVE_LINUX_PAGEVEC_LRU_ADD_FILE
43 #define __pagevec_lru_add_file __pagevec_lru_add
44 #endif
46 #ifndef MAX_ERRNO
47 #define MAX_ERRNO 1000L
48 #endif
50 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34)
51 /* Enable our workaround for a race with d_splice_alias. The race was fixed in
52 * 2.6.34, so don't do it after that point. */
53 # define D_SPLICE_ALIAS_RACE
54 #endif
56 int cachefs_noreadpage = 0;
58 extern struct backing_dev_info *afs_backing_dev_info;
60 extern struct vcache *afs_globalVp;
62 /* This function converts a positive error code from AFS into a negative
63 * code suitable for passing into the Linux VFS layer. It checks that the
64 * error code is within the permissable bounds for the ERR_PTR mechanism.
66 * _All_ error codes which come from the AFS layer should be passed through
67 * this function before being returned to the kernel.
70 static inline int
71 afs_convert_code(int code) {
72 if ((code >= 0) && (code <= MAX_ERRNO))
73 return -code;
74 else
75 return -EIO;
78 /* Linux doesn't require a credp for many functions, and crref is an expensive
79 * operation. This helper function avoids obtaining it for VerifyVCache calls
82 static inline int
83 afs_linux_VerifyVCache(struct vcache *avc, cred_t **retcred) {
84 cred_t *credp = NULL;
85 struct vrequest *treq = NULL;
86 int code;
88 if (avc->f.states & CStatd) {
89 if (retcred)
90 *retcred = NULL;
91 return 0;
94 credp = crref();
96 code = afs_CreateReq(&treq, credp);
97 if (code == 0) {
98 code = afs_VerifyVCache2(avc, treq);
99 afs_DestroyReq(treq);
102 if (retcred != NULL)
103 *retcred = credp;
104 else
105 crfree(credp);
107 return afs_convert_code(code);
110 #if defined(STRUCT_FILE_OPERATIONS_HAS_READ_ITER) || defined(HAVE_LINUX_GENERIC_FILE_AIO_READ)
111 # if defined(STRUCT_FILE_OPERATIONS_HAS_READ_ITER)
112 static ssize_t
113 afs_linux_read_iter(struct kiocb *iocb, struct iov_iter *iter)
114 # elif defined(LINUX_HAS_NONVECTOR_AIO)
115 static ssize_t
116 afs_linux_aio_read(struct kiocb *iocb, char __user *buf, size_t bufsize,
117 loff_t pos)
118 # else
119 static ssize_t
120 afs_linux_aio_read(struct kiocb *iocb, const struct iovec *buf,
121 unsigned long bufsize, loff_t pos)
122 # endif
124 struct file *fp = iocb->ki_filp;
125 ssize_t code = 0;
126 struct vcache *vcp = VTOAFS(fp->f_dentry->d_inode);
127 # if defined(STRUCT_FILE_OPERATIONS_HAS_READ_ITER)
128 loff_t pos = iocb->ki_pos;
129 unsigned long bufsize = iter->nr_segs;
130 # endif
133 AFS_GLOCK();
134 afs_Trace4(afs_iclSetp, CM_TRACE_AIOREADOP, ICL_TYPE_POINTER, vcp,
135 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(pos), ICL_TYPE_INT32,
136 (afs_int32)bufsize, ICL_TYPE_INT32, 99999);
137 code = afs_linux_VerifyVCache(vcp, NULL);
139 if (code == 0) {
140 /* Linux's FlushPages implementation doesn't ever use credp,
141 * so we optimise by not using it */
142 osi_FlushPages(vcp, NULL); /* ensure stale pages are gone */
143 AFS_GUNLOCK();
144 # if defined(STRUCT_FILE_OPERATIONS_HAS_READ_ITER)
145 code = generic_file_read_iter(iocb, iter);
146 # else
147 code = generic_file_aio_read(iocb, buf, bufsize, pos);
148 # endif
149 AFS_GLOCK();
152 afs_Trace4(afs_iclSetp, CM_TRACE_AIOREADOP, ICL_TYPE_POINTER, vcp,
153 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(pos), ICL_TYPE_INT32,
154 (afs_int32)bufsize, ICL_TYPE_INT32, code);
155 AFS_GUNLOCK();
156 return code;
158 #else
159 static ssize_t
160 afs_linux_read(struct file *fp, char *buf, size_t count, loff_t * offp)
162 ssize_t code = 0;
163 struct vcache *vcp = VTOAFS(fp->f_dentry->d_inode);
165 AFS_GLOCK();
166 afs_Trace4(afs_iclSetp, CM_TRACE_READOP, ICL_TYPE_POINTER, vcp,
167 ICL_TYPE_OFFSET, offp, ICL_TYPE_INT32, count, ICL_TYPE_INT32,
168 99999);
169 code = afs_linux_VerifyVCache(vcp, NULL);
171 if (code == 0) {
172 /* Linux's FlushPages implementation doesn't ever use credp,
173 * so we optimise by not using it */
174 osi_FlushPages(vcp, NULL); /* ensure stale pages are gone */
175 AFS_GUNLOCK();
176 code = do_sync_read(fp, buf, count, offp);
177 AFS_GLOCK();
180 afs_Trace4(afs_iclSetp, CM_TRACE_READOP, ICL_TYPE_POINTER, vcp,
181 ICL_TYPE_OFFSET, offp, ICL_TYPE_INT32, count, ICL_TYPE_INT32,
182 code);
183 AFS_GUNLOCK();
184 return code;
186 #endif
189 /* Now we have integrated VM for writes as well as reads. the generic write operations
190 * also take care of re-positioning the pointer if file is open in append
191 * mode. Call fake open/close to ensure we do writes of core dumps.
193 #if defined(STRUCT_FILE_OPERATIONS_HAS_READ_ITER) || defined(HAVE_LINUX_GENERIC_FILE_AIO_READ)
194 # if defined(STRUCT_FILE_OPERATIONS_HAS_READ_ITER)
195 static ssize_t
196 afs_linux_write_iter(struct kiocb *iocb, struct iov_iter *iter)
197 # elif defined(LINUX_HAS_NONVECTOR_AIO)
198 static ssize_t
199 afs_linux_aio_write(struct kiocb *iocb, const char __user *buf, size_t bufsize,
200 loff_t pos)
201 # else
202 static ssize_t
203 afs_linux_aio_write(struct kiocb *iocb, const struct iovec *buf,
204 unsigned long bufsize, loff_t pos)
205 # endif
207 ssize_t code = 0;
208 struct vcache *vcp = VTOAFS(iocb->ki_filp->f_dentry->d_inode);
209 cred_t *credp;
210 # if defined(STRUCT_FILE_OPERATIONS_HAS_READ_ITER)
211 loff_t pos = iocb->ki_pos;
212 unsigned long bufsize = iter->nr_segs;
213 # endif
215 AFS_GLOCK();
217 afs_Trace4(afs_iclSetp, CM_TRACE_AIOWRITEOP, ICL_TYPE_POINTER, vcp,
218 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(pos), ICL_TYPE_INT32,
219 (afs_int32)bufsize, ICL_TYPE_INT32,
220 (iocb->ki_filp->f_flags & O_APPEND) ? 99998 : 99999);
222 code = afs_linux_VerifyVCache(vcp, &credp);
224 ObtainWriteLock(&vcp->lock, 529);
225 afs_FakeOpen(vcp);
226 ReleaseWriteLock(&vcp->lock);
227 if (code == 0) {
228 AFS_GUNLOCK();
229 # if defined(STRUCT_FILE_OPERATIONS_HAS_READ_ITER)
230 code = generic_file_write_iter(iocb, iter);
231 # else
232 code = generic_file_aio_write(iocb, buf, bufsize, pos);
233 # endif
234 AFS_GLOCK();
237 ObtainWriteLock(&vcp->lock, 530);
239 if (vcp->execsOrWriters == 1 && !credp)
240 credp = crref();
242 afs_FakeClose(vcp, credp);
243 ReleaseWriteLock(&vcp->lock);
245 afs_Trace4(afs_iclSetp, CM_TRACE_AIOWRITEOP, ICL_TYPE_POINTER, vcp,
246 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(pos), ICL_TYPE_INT32,
247 (afs_int32)bufsize, ICL_TYPE_INT32, code);
249 if (credp)
250 crfree(credp);
251 AFS_GUNLOCK();
252 return code;
254 #else
255 static ssize_t
256 afs_linux_write(struct file *fp, const char *buf, size_t count, loff_t * offp)
258 ssize_t code = 0;
259 struct vcache *vcp = VTOAFS(fp->f_dentry->d_inode);
260 cred_t *credp;
262 AFS_GLOCK();
264 afs_Trace4(afs_iclSetp, CM_TRACE_WRITEOP, ICL_TYPE_POINTER, vcp,
265 ICL_TYPE_OFFSET, offp, ICL_TYPE_INT32, count, ICL_TYPE_INT32,
266 (fp->f_flags & O_APPEND) ? 99998 : 99999);
268 code = afs_linux_VerifyVCache(vcp, &credp);
270 ObtainWriteLock(&vcp->lock, 529);
271 afs_FakeOpen(vcp);
272 ReleaseWriteLock(&vcp->lock);
273 if (code == 0) {
274 AFS_GUNLOCK();
275 code = do_sync_write(fp, buf, count, offp);
276 AFS_GLOCK();
279 ObtainWriteLock(&vcp->lock, 530);
281 if (vcp->execsOrWriters == 1 && !credp)
282 credp = crref();
284 afs_FakeClose(vcp, credp);
285 ReleaseWriteLock(&vcp->lock);
287 afs_Trace4(afs_iclSetp, CM_TRACE_WRITEOP, ICL_TYPE_POINTER, vcp,
288 ICL_TYPE_OFFSET, offp, ICL_TYPE_INT32, count, ICL_TYPE_INT32,
289 code);
291 if (credp)
292 crfree(credp);
293 AFS_GUNLOCK();
294 return code;
296 #endif
298 extern int BlobScan(struct dcache * afile, afs_int32 ablob, afs_int32 *ablobOut);
300 /* This is a complete rewrite of afs_readdir, since we can make use of
301 * filldir instead of afs_readdir_move. Note that changes to vcache/dcache
302 * handling and use of bulkstats will need to be reflected here as well.
304 static int
305 #if defined(STRUCT_FILE_OPERATIONS_HAS_ITERATE)
306 afs_linux_readdir(struct file *fp, struct dir_context *ctx)
307 #else
308 afs_linux_readdir(struct file *fp, void *dirbuf, filldir_t filldir)
309 #endif
311 struct vcache *avc = VTOAFS(FILE_INODE(fp));
312 struct vrequest *treq = NULL;
313 struct dcache *tdc;
314 int code;
315 int offset;
316 afs_int32 dirpos;
317 struct DirEntry *de;
318 struct DirBuffer entry;
319 ino_t ino;
320 int len;
321 afs_size_t origOffset, tlen;
322 cred_t *credp = crref();
323 struct afs_fakestat_state fakestat;
325 AFS_GLOCK();
326 AFS_STATCNT(afs_readdir);
328 code = afs_convert_code(afs_CreateReq(&treq, credp));
329 crfree(credp);
330 if (code)
331 goto out1;
333 afs_InitFakeStat(&fakestat);
334 code = afs_convert_code(afs_EvalFakeStat(&avc, &fakestat, treq));
335 if (code)
336 goto out;
338 /* update the cache entry */
339 tagain:
340 code = afs_convert_code(afs_VerifyVCache2(avc, treq));
341 if (code)
342 goto out;
344 /* get a reference to the entire directory */
345 tdc = afs_GetDCache(avc, (afs_size_t) 0, treq, &origOffset, &tlen, 1);
346 len = tlen;
347 if (!tdc) {
348 code = -EIO;
349 goto out;
351 ObtainWriteLock(&avc->lock, 811);
352 ObtainReadLock(&tdc->lock);
354 * Make sure that the data in the cache is current. There are two
355 * cases we need to worry about:
356 * 1. The cache data is being fetched by another process.
357 * 2. The cache data is no longer valid
359 while ((avc->f.states & CStatd)
360 && (tdc->dflags & DFFetching)
361 && hsame(avc->f.m.DataVersion, tdc->f.versionNo)) {
362 ReleaseReadLock(&tdc->lock);
363 ReleaseWriteLock(&avc->lock);
364 afs_osi_Sleep(&tdc->validPos);
365 ObtainWriteLock(&avc->lock, 812);
366 ObtainReadLock(&tdc->lock);
368 if (!(avc->f.states & CStatd)
369 || !hsame(avc->f.m.DataVersion, tdc->f.versionNo)) {
370 ReleaseReadLock(&tdc->lock);
371 ReleaseWriteLock(&avc->lock);
372 afs_PutDCache(tdc);
373 goto tagain;
376 /* Set the readdir-in-progress flag, and downgrade the lock
377 * to shared so others will be able to acquire a read lock.
379 avc->f.states |= CReadDir;
380 avc->dcreaddir = tdc;
381 avc->readdir_pid = MyPidxx2Pid(MyPidxx);
382 ConvertWToSLock(&avc->lock);
384 /* Fill in until we get an error or we're done. This implementation
385 * takes an offset in units of blobs, rather than bytes.
387 code = 0;
388 #if defined(STRUCT_FILE_OPERATIONS_HAS_ITERATE)
389 offset = ctx->pos;
390 #else
391 offset = (int) fp->f_pos;
392 #endif
393 while (1) {
394 code = BlobScan(tdc, offset, &dirpos);
395 if (code || !dirpos)
396 break;
398 code = afs_dir_GetVerifiedBlob(tdc, dirpos, &entry);
399 if (code) {
400 if (!(avc->f.states & CCorrupt)) {
401 struct cell *tc = afs_GetCellStale(avc->f.fid.Cell, READ_LOCK);
402 afs_warn("afs: Corrupt directory (%d.%d.%d.%d [%s] @%lx, pos %d)\n",
403 avc->f.fid.Cell, avc->f.fid.Fid.Volume,
404 avc->f.fid.Fid.Vnode, avc->f.fid.Fid.Unique,
405 tc ? tc->cellName : "",
406 (unsigned long)&tdc->f.inode, dirpos);
407 if (tc)
408 afs_PutCell(tc, READ_LOCK);
409 UpgradeSToWLock(&avc->lock, 814);
410 avc->f.states |= CCorrupt;
412 code = -EIO;
413 goto unlock_out;
416 de = (struct DirEntry *)entry.data;
417 ino = afs_calc_inum (avc->f.fid.Cell, avc->f.fid.Fid.Volume,
418 ntohl(de->fid.vnode));
419 len = strlen(de->name);
421 /* filldir returns -EINVAL when the buffer is full. */
423 unsigned int type = DT_UNKNOWN;
424 struct VenusFid afid;
425 struct vcache *tvc;
426 int vtype;
427 afid.Cell = avc->f.fid.Cell;
428 afid.Fid.Volume = avc->f.fid.Fid.Volume;
429 afid.Fid.Vnode = ntohl(de->fid.vnode);
430 afid.Fid.Unique = ntohl(de->fid.vunique);
431 if ((avc->f.states & CForeign) == 0 && (ntohl(de->fid.vnode) & 1)) {
432 type = DT_DIR;
433 } else if ((tvc = afs_FindVCache(&afid, 0, 0))) {
434 if (tvc->mvstat != AFS_MVSTAT_FILE) {
435 type = DT_DIR;
436 } else if (((tvc->f.states) & (CStatd | CTruth))) {
437 /* CTruth will be set if the object has
438 *ever* been statd */
439 vtype = vType(tvc);
440 if (vtype == VDIR)
441 type = DT_DIR;
442 else if (vtype == VREG)
443 type = DT_REG;
444 /* Don't do this until we're sure it can't be a mtpt */
445 /* else if (vtype == VLNK)
446 * type=DT_LNK; */
447 /* what other types does AFS support? */
449 /* clean up from afs_FindVCache */
450 afs_PutVCache(tvc);
453 * If this is NFS readdirplus, then the filler is going to
454 * call getattr on this inode, which will deadlock if we're
455 * holding the GLOCK.
457 AFS_GUNLOCK();
458 #if defined(STRUCT_FILE_OPERATIONS_HAS_ITERATE)
459 /* dir_emit returns a bool - true when it succeeds.
460 * Inverse the result to fit with how we check "code" */
461 code = !dir_emit(ctx, de->name, len, ino, type);
462 #else
463 code = (*filldir) (dirbuf, de->name, len, offset, ino, type);
464 #endif
465 AFS_GLOCK();
467 DRelease(&entry, 0);
468 if (code)
469 break;
470 offset = dirpos + 1 + ((len + 16) >> 5);
472 /* If filldir didn't fill in the last one this is still pointing to that
473 * last attempt.
475 code = 0;
477 unlock_out:
478 #if defined(STRUCT_FILE_OPERATIONS_HAS_ITERATE)
479 ctx->pos = (loff_t) offset;
480 #else
481 fp->f_pos = (loff_t) offset;
482 #endif
483 ReleaseReadLock(&tdc->lock);
484 afs_PutDCache(tdc);
485 UpgradeSToWLock(&avc->lock, 813);
486 avc->f.states &= ~CReadDir;
487 avc->dcreaddir = 0;
488 avc->readdir_pid = 0;
489 ReleaseSharedLock(&avc->lock);
491 out:
492 afs_PutFakeStat(&fakestat);
493 afs_DestroyReq(treq);
494 out1:
495 AFS_GUNLOCK();
496 return code;
500 /* in afs_pioctl.c */
501 extern int afs_xioctl(struct inode *ip, struct file *fp, unsigned int com,
502 unsigned long arg);
504 #if defined(HAVE_UNLOCKED_IOCTL) || defined(HAVE_COMPAT_IOCTL)
505 static long afs_unlocked_xioctl(struct file *fp, unsigned int com,
506 unsigned long arg) {
507 return afs_xioctl(FILE_INODE(fp), fp, com, arg);
510 #endif
513 static int
514 afs_linux_mmap(struct file *fp, struct vm_area_struct *vmap)
516 struct vcache *vcp = VTOAFS(FILE_INODE(fp));
517 int code;
519 AFS_GLOCK();
520 afs_Trace3(afs_iclSetp, CM_TRACE_GMAP, ICL_TYPE_POINTER, vcp,
521 ICL_TYPE_POINTER, vmap->vm_start, ICL_TYPE_INT32,
522 vmap->vm_end - vmap->vm_start);
524 /* get a validated vcache entry */
525 code = afs_linux_VerifyVCache(vcp, NULL);
527 if (code == 0) {
528 /* Linux's Flushpage implementation doesn't use credp, so optimise
529 * our code to not need to crref() it */
530 osi_FlushPages(vcp, NULL); /* ensure stale pages are gone */
531 AFS_GUNLOCK();
532 code = generic_file_mmap(fp, vmap);
533 AFS_GLOCK();
534 if (!code)
535 vcp->f.states |= CMAPPED;
537 AFS_GUNLOCK();
539 return code;
542 static int
543 afs_linux_open(struct inode *ip, struct file *fp)
545 struct vcache *vcp = VTOAFS(ip);
546 cred_t *credp = crref();
547 int code;
549 AFS_GLOCK();
550 code = afs_open(&vcp, fp->f_flags, credp);
551 AFS_GUNLOCK();
553 crfree(credp);
554 return afs_convert_code(code);
557 static int
558 afs_linux_release(struct inode *ip, struct file *fp)
560 struct vcache *vcp = VTOAFS(ip);
561 cred_t *credp = crref();
562 int code = 0;
564 AFS_GLOCK();
565 code = afs_close(vcp, fp->f_flags, credp);
566 ObtainWriteLock(&vcp->lock, 807);
567 if (vcp->cred) {
568 crfree(vcp->cred);
569 vcp->cred = NULL;
571 ReleaseWriteLock(&vcp->lock);
572 AFS_GUNLOCK();
574 crfree(credp);
575 return afs_convert_code(code);
578 static int
579 #if defined(FOP_FSYNC_TAKES_DENTRY)
580 afs_linux_fsync(struct file *fp, struct dentry *dp, int datasync)
581 #elif defined(FOP_FSYNC_TAKES_RANGE)
582 afs_linux_fsync(struct file *fp, loff_t start, loff_t end, int datasync)
583 #else
584 afs_linux_fsync(struct file *fp, int datasync)
585 #endif
587 int code;
588 struct inode *ip = FILE_INODE(fp);
589 cred_t *credp = crref();
591 #if defined(FOP_FSYNC_TAKES_RANGE)
592 afs_linux_lock_inode(ip);
593 #endif
594 AFS_GLOCK();
595 code = afs_fsync(VTOAFS(ip), credp);
596 AFS_GUNLOCK();
597 #if defined(FOP_FSYNC_TAKES_RANGE)
598 afs_linux_unlock_inode(ip);
599 #endif
600 crfree(credp);
601 return afs_convert_code(code);
606 static int
607 afs_linux_lock(struct file *fp, int cmd, struct file_lock *flp)
609 int code = 0;
610 struct vcache *vcp = VTOAFS(FILE_INODE(fp));
611 cred_t *credp = crref();
612 struct AFS_FLOCK flock;
614 /* Convert to a lock format afs_lockctl understands. */
615 memset(&flock, 0, sizeof(flock));
616 flock.l_type = flp->fl_type;
617 flock.l_pid = flp->fl_pid;
618 flock.l_whence = 0;
619 flock.l_start = flp->fl_start;
620 if (flp->fl_end == OFFSET_MAX)
621 flock.l_len = 0; /* Lock to end of file */
622 else
623 flock.l_len = flp->fl_end - flp->fl_start + 1;
625 /* Safe because there are no large files, yet */
626 #if defined(F_GETLK64) && (F_GETLK != F_GETLK64)
627 if (cmd == F_GETLK64)
628 cmd = F_GETLK;
629 else if (cmd == F_SETLK64)
630 cmd = F_SETLK;
631 else if (cmd == F_SETLKW64)
632 cmd = F_SETLKW;
633 #endif /* F_GETLK64 && F_GETLK != F_GETLK64 */
635 AFS_GLOCK();
636 code = afs_convert_code(afs_lockctl(vcp, &flock, cmd, credp));
637 AFS_GUNLOCK();
639 if ((code == 0 || flp->fl_type == F_UNLCK) &&
640 (cmd == F_SETLK || cmd == F_SETLKW)) {
641 code = afs_posix_lock_file(fp, flp);
642 if (code && flp->fl_type != F_UNLCK) {
643 struct AFS_FLOCK flock2;
644 flock2 = flock;
645 flock2.l_type = F_UNLCK;
646 AFS_GLOCK();
647 afs_lockctl(vcp, &flock2, F_SETLK, credp);
648 AFS_GUNLOCK();
651 /* If lockctl says there are no conflicting locks, then also check with the
652 * kernel, as lockctl knows nothing about byte range locks
654 if (code == 0 && cmd == F_GETLK && flock.l_type == F_UNLCK) {
655 afs_posix_test_lock(fp, flp);
656 /* If we found a lock in the kernel's structure, return it */
657 if (flp->fl_type != F_UNLCK) {
658 crfree(credp);
659 return 0;
663 /* Convert flock back to Linux's file_lock */
664 flp->fl_type = flock.l_type;
665 flp->fl_pid = flock.l_pid;
666 flp->fl_start = flock.l_start;
667 if (flock.l_len == 0)
668 flp->fl_end = OFFSET_MAX; /* Lock to end of file */
669 else
670 flp->fl_end = flock.l_start + flock.l_len - 1;
672 crfree(credp);
673 return code;
676 #ifdef STRUCT_FILE_OPERATIONS_HAS_FLOCK
677 static int
678 afs_linux_flock(struct file *fp, int cmd, struct file_lock *flp) {
679 int code = 0;
680 struct vcache *vcp = VTOAFS(FILE_INODE(fp));
681 cred_t *credp = crref();
682 struct AFS_FLOCK flock;
683 /* Convert to a lock format afs_lockctl understands. */
684 memset(&flock, 0, sizeof(flock));
685 flock.l_type = flp->fl_type;
686 flock.l_pid = flp->fl_pid;
687 flock.l_whence = 0;
688 flock.l_start = 0;
689 flock.l_len = 0;
691 /* Safe because there are no large files, yet */
692 #if defined(F_GETLK64) && (F_GETLK != F_GETLK64)
693 if (cmd == F_GETLK64)
694 cmd = F_GETLK;
695 else if (cmd == F_SETLK64)
696 cmd = F_SETLK;
697 else if (cmd == F_SETLKW64)
698 cmd = F_SETLKW;
699 #endif /* F_GETLK64 && F_GETLK != F_GETLK64 */
701 AFS_GLOCK();
702 code = afs_convert_code(afs_lockctl(vcp, &flock, cmd, credp));
703 AFS_GUNLOCK();
705 if ((code == 0 || flp->fl_type == F_UNLCK) &&
706 (cmd == F_SETLK || cmd == F_SETLKW)) {
707 flp->fl_flags &=~ FL_SLEEP;
708 code = flock_lock_file_wait(fp, flp);
709 if (code && flp->fl_type != F_UNLCK) {
710 struct AFS_FLOCK flock2;
711 flock2 = flock;
712 flock2.l_type = F_UNLCK;
713 AFS_GLOCK();
714 afs_lockctl(vcp, &flock2, F_SETLK, credp);
715 AFS_GUNLOCK();
718 /* Convert flock back to Linux's file_lock */
719 flp->fl_type = flock.l_type;
720 flp->fl_pid = flock.l_pid;
722 crfree(credp);
723 return code;
725 #endif
727 /* afs_linux_flush
728 * essentially the same as afs_fsync() but we need to get the return
729 * code for the sys_close() here, not afs_linux_release(), so call
730 * afs_StoreAllSegments() with AFS_LASTSTORE
732 static int
733 #if defined(FOP_FLUSH_TAKES_FL_OWNER_T)
734 afs_linux_flush(struct file *fp, fl_owner_t id)
735 #else
736 afs_linux_flush(struct file *fp)
737 #endif
739 struct vrequest *treq = NULL;
740 struct vcache *vcp;
741 cred_t *credp;
742 int code;
743 int bypasscache = 0;
745 AFS_GLOCK();
747 if ((fp->f_flags & O_ACCMODE) == O_RDONLY) { /* readers dont flush */
748 AFS_GUNLOCK();
749 return 0;
752 AFS_DISCON_LOCK();
754 credp = crref();
755 vcp = VTOAFS(FILE_INODE(fp));
757 code = afs_CreateReq(&treq, credp);
758 if (code)
759 goto out;
760 /* If caching is bypassed for this file, or globally, just return 0 */
761 if (cache_bypass_strategy == ALWAYS_BYPASS_CACHE)
762 bypasscache = 1;
763 else {
764 ObtainReadLock(&vcp->lock);
765 if (vcp->cachingStates & FCSBypass)
766 bypasscache = 1;
767 ReleaseReadLock(&vcp->lock);
769 if (bypasscache) {
770 /* future proof: don't rely on 0 return from afs_InitReq */
771 code = 0;
772 goto out;
775 ObtainSharedLock(&vcp->lock, 535);
776 if ((vcp->execsOrWriters > 0) && (file_count(fp) == 1)) {
777 UpgradeSToWLock(&vcp->lock, 536);
778 if (!AFS_IS_DISCONNECTED) {
779 code = afs_StoreAllSegments(vcp,
780 treq,
781 AFS_SYNC | AFS_LASTSTORE);
782 } else {
783 afs_DisconAddDirty(vcp, VDisconWriteOsiFlush, 1);
785 ConvertWToSLock(&vcp->lock);
787 code = afs_CheckCode(code, treq, 54);
788 ReleaseSharedLock(&vcp->lock);
790 out:
791 afs_DestroyReq(treq);
792 AFS_DISCON_UNLOCK();
793 AFS_GUNLOCK();
795 crfree(credp);
796 return afs_convert_code(code);
799 struct file_operations afs_dir_fops = {
800 .read = generic_read_dir,
801 #if defined(STRUCT_FILE_OPERATIONS_HAS_ITERATE)
802 .iterate = afs_linux_readdir,
803 #else
804 .readdir = afs_linux_readdir,
805 #endif
806 #ifdef HAVE_UNLOCKED_IOCTL
807 .unlocked_ioctl = afs_unlocked_xioctl,
808 #else
809 .ioctl = afs_xioctl,
810 #endif
811 #ifdef HAVE_COMPAT_IOCTL
812 .compat_ioctl = afs_unlocked_xioctl,
813 #endif
814 .open = afs_linux_open,
815 .release = afs_linux_release,
816 .llseek = default_llseek,
817 #ifdef HAVE_LINUX_NOOP_FSYNC
818 .fsync = noop_fsync,
819 #else
820 .fsync = simple_sync_file,
821 #endif
824 struct file_operations afs_file_fops = {
825 #ifdef STRUCT_FILE_OPERATIONS_HAS_READ_ITER
826 .read_iter = afs_linux_read_iter,
827 .write_iter = afs_linux_write_iter,
828 # if !defined(HAVE_LINUX___VFS_WRITE)
829 .read = new_sync_read,
830 .write = new_sync_write,
831 # endif
832 #elif defined(HAVE_LINUX_GENERIC_FILE_AIO_READ)
833 .aio_read = afs_linux_aio_read,
834 .aio_write = afs_linux_aio_write,
835 .read = do_sync_read,
836 .write = do_sync_write,
837 #else
838 .read = afs_linux_read,
839 .write = afs_linux_write,
840 #endif
841 #ifdef HAVE_UNLOCKED_IOCTL
842 .unlocked_ioctl = afs_unlocked_xioctl,
843 #else
844 .ioctl = afs_xioctl,
845 #endif
846 #ifdef HAVE_COMPAT_IOCTL
847 .compat_ioctl = afs_unlocked_xioctl,
848 #endif
849 .mmap = afs_linux_mmap,
850 .open = afs_linux_open,
851 .flush = afs_linux_flush,
852 #if defined(STRUCT_FILE_OPERATIONS_HAS_SENDFILE)
853 .sendfile = generic_file_sendfile,
854 #endif
855 #if defined(STRUCT_FILE_OPERATIONS_HAS_SPLICE) && !defined(HAVE_LINUX_DEFAULT_FILE_SPLICE_READ)
856 # if defined(HAVE_LINUX_ITER_FILE_SPLICE_WRITE)
857 .splice_write = iter_file_splice_write,
858 # else
859 .splice_write = generic_file_splice_write,
860 # endif
861 .splice_read = generic_file_splice_read,
862 #endif
863 .release = afs_linux_release,
864 .fsync = afs_linux_fsync,
865 .lock = afs_linux_lock,
866 #ifdef STRUCT_FILE_OPERATIONS_HAS_FLOCK
867 .flock = afs_linux_flock,
868 #endif
869 .llseek = default_llseek,
872 static struct dentry *
873 canonical_dentry(struct inode *ip)
875 struct vcache *vcp = VTOAFS(ip);
876 struct dentry *first = NULL, *ret = NULL, *cur;
877 #if defined(D_ALIAS_IS_HLIST) && !defined(HLIST_ITERATOR_NO_NODE)
878 struct hlist_node *p;
879 #endif
881 /* general strategy:
882 * if vcp->target_link is set, and can be found in ip->i_dentry, use that.
883 * otherwise, use the first dentry in ip->i_dentry.
884 * if ip->i_dentry is empty, use the 'dentry' argument we were given.
886 /* note that vcp->target_link specifies which dentry to use, but we have
887 * no reference held on that dentry. so, we cannot use or dereference
888 * vcp->target_link itself, since it may have been freed. instead, we only
889 * use it to compare to pointers in the ip->i_dentry list. */
891 d_prune_aliases(ip);
893 # ifdef HAVE_DCACHE_LOCK
894 spin_lock(&dcache_lock);
895 # else
896 spin_lock(&ip->i_lock);
897 # endif
899 #if defined(D_ALIAS_IS_HLIST)
900 # if defined(HLIST_ITERATOR_NO_NODE)
901 hlist_for_each_entry(cur, &ip->i_dentry, d_alias) {
902 # else
903 hlist_for_each_entry(cur, p, &ip->i_dentry, d_alias) {
904 # endif
905 #else
906 list_for_each_entry_reverse(cur, &ip->i_dentry, d_alias) {
907 #endif
909 if (!vcp->target_link || cur == vcp->target_link) {
910 ret = cur;
911 break;
914 if (!first) {
915 first = cur;
918 if (!ret && first) {
919 ret = first;
922 vcp->target_link = ret;
924 # ifdef HAVE_DCACHE_LOCK
925 if (ret) {
926 dget_locked(ret);
928 spin_unlock(&dcache_lock);
929 # else
930 if (ret) {
931 dget(ret);
933 spin_unlock(&ip->i_lock);
934 # endif
936 return ret;
939 /**********************************************************************
940 * AFS Linux dentry operations
941 **********************************************************************/
943 /* afs_linux_revalidate
944 * Ensure vcache is stat'd before use. Return 0 if entry is valid.
946 static int
947 afs_linux_revalidate(struct dentry *dp)
949 struct vattr *vattr = NULL;
950 struct vcache *vcp = VTOAFS(dp->d_inode);
951 cred_t *credp;
952 int code;
954 if (afs_shuttingdown != AFS_RUNNING)
955 return EIO;
957 AFS_GLOCK();
959 code = afs_CreateAttr(&vattr);
960 if (code) {
961 goto out;
964 /* This avoids the crref when we don't have to do it. Watch for
965 * changes in afs_getattr that don't get replicated here!
967 if (vcp->f.states & CStatd &&
968 (!afs_fakestat_enable || vcp->mvstat != AFS_MVSTAT_MTPT) &&
969 !afs_nfsexporter &&
970 (vType(vcp) == VDIR || vType(vcp) == VLNK)) {
971 code = afs_CopyOutAttrs(vcp, vattr);
972 } else {
973 credp = crref();
974 code = afs_getattr(vcp, vattr, credp);
975 crfree(credp);
978 if (!code)
979 afs_fill_inode(AFSTOV(vcp), vattr);
981 afs_DestroyAttr(vattr);
983 out:
984 AFS_GUNLOCK();
986 return afs_convert_code(code);
989 /* vattr_setattr
990 * Set iattr data into vattr. Assume vattr cleared before call.
992 static void
993 iattr2vattr(struct vattr *vattrp, struct iattr *iattrp)
995 vattrp->va_mask = iattrp->ia_valid;
996 if (iattrp->ia_valid & ATTR_MODE)
997 vattrp->va_mode = iattrp->ia_mode;
998 if (iattrp->ia_valid & ATTR_UID)
999 vattrp->va_uid = afs_from_kuid(iattrp->ia_uid);
1000 if (iattrp->ia_valid & ATTR_GID)
1001 vattrp->va_gid = afs_from_kgid(iattrp->ia_gid);
1002 if (iattrp->ia_valid & ATTR_SIZE)
1003 vattrp->va_size = iattrp->ia_size;
1004 if (iattrp->ia_valid & ATTR_ATIME) {
1005 vattrp->va_atime.tv_sec = iattrp->ia_atime.tv_sec;
1006 vattrp->va_atime.tv_usec = 0;
1008 if (iattrp->ia_valid & ATTR_MTIME) {
1009 vattrp->va_mtime.tv_sec = iattrp->ia_mtime.tv_sec;
1010 vattrp->va_mtime.tv_usec = 0;
1012 if (iattrp->ia_valid & ATTR_CTIME) {
1013 vattrp->va_ctime.tv_sec = iattrp->ia_ctime.tv_sec;
1014 vattrp->va_ctime.tv_usec = 0;
1018 /* vattr2inode
1019 * Rewrite the inode cache from the attr. Assumes all vattr fields are valid.
1021 void
1022 vattr2inode(struct inode *ip, struct vattr *vp)
1024 ip->i_ino = vp->va_nodeid;
1025 #ifdef HAVE_LINUX_SET_NLINK
1026 set_nlink(ip, vp->va_nlink);
1027 #else
1028 ip->i_nlink = vp->va_nlink;
1029 #endif
1030 ip->i_blocks = vp->va_blocks;
1031 #ifdef STRUCT_INODE_HAS_I_BLKBITS
1032 ip->i_blkbits = AFS_BLKBITS;
1033 #endif
1034 #ifdef STRUCT_INODE_HAS_I_BLKSIZE
1035 ip->i_blksize = vp->va_blocksize;
1036 #endif
1037 ip->i_rdev = vp->va_rdev;
1038 ip->i_mode = vp->va_mode;
1039 ip->i_uid = afs_make_kuid(vp->va_uid);
1040 ip->i_gid = afs_make_kgid(vp->va_gid);
1041 i_size_write(ip, vp->va_size);
1042 ip->i_atime.tv_sec = vp->va_atime.tv_sec;
1043 ip->i_atime.tv_nsec = 0;
1044 ip->i_mtime.tv_sec = vp->va_mtime.tv_sec;
1045 /* Set the mtime nanoseconds to the sysname generation number.
1046 * This convinces NFS clients that all directories have changed
1047 * any time the sysname list changes.
1049 ip->i_mtime.tv_nsec = afs_sysnamegen;
1050 ip->i_ctime.tv_sec = vp->va_ctime.tv_sec;
1051 ip->i_ctime.tv_nsec = 0;
1054 /* afs_notify_change
1055 * Linux version of setattr call. What to change is in the iattr struct.
1056 * We need to set bits in both the Linux inode as well as the vcache.
1058 static int
1059 afs_notify_change(struct dentry *dp, struct iattr *iattrp)
1061 struct vattr *vattr = NULL;
1062 cred_t *credp = crref();
1063 struct inode *ip = dp->d_inode;
1064 int code;
1066 AFS_GLOCK();
1067 code = afs_CreateAttr(&vattr);
1068 if (code) {
1069 goto out;
1072 iattr2vattr(vattr, iattrp); /* Convert for AFS vnodeops call. */
1074 code = afs_setattr(VTOAFS(ip), vattr, credp);
1075 if (!code) {
1076 afs_getattr(VTOAFS(ip), vattr, credp);
1077 vattr2inode(ip, vattr);
1079 afs_DestroyAttr(vattr);
1081 out:
1082 AFS_GUNLOCK();
1083 crfree(credp);
1084 return afs_convert_code(code);
1087 #if defined(IOP_GETATTR_TAKES_PATH_STRUCT)
1088 static int
1089 afs_linux_getattr(const struct path *path, struct kstat *stat, u32 request_mask, unsigned int sync_mode)
1091 int err = afs_linux_revalidate(path->dentry);
1092 if (!err) {
1093 generic_fillattr(path->dentry->d_inode, stat);
1095 return err;
1097 #else
1098 static int
1099 afs_linux_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
1101 int err = afs_linux_revalidate(dentry);
1102 if (!err) {
1103 generic_fillattr(dentry->d_inode, stat);
1105 return err;
1107 #endif
1109 static afs_uint32
1110 parent_vcache_dv(struct inode *inode, cred_t *credp)
1112 int free_cred = 0;
1113 struct vcache *pvcp;
1116 * If parent is a mount point and we are using fakestat, we may need
1117 * to look at the fake vcache entry instead of what the vfs is giving
1118 * us. The fake entry is the one with the useful DataVersion.
1120 pvcp = VTOAFS(inode);
1121 if (pvcp->mvstat == AFS_MVSTAT_MTPT && afs_fakestat_enable) {
1122 struct vrequest treq;
1123 struct afs_fakestat_state fakestate;
1125 if (!credp) {
1126 credp = crref();
1127 free_cred = 1;
1129 afs_InitReq(&treq, credp);
1130 afs_InitFakeStat(&fakestate);
1131 afs_TryEvalFakeStat(&pvcp, &fakestate, &treq);
1132 if (free_cred)
1133 crfree(credp);
1134 afs_PutFakeStat(&fakestate);
1136 return hgetlo(pvcp->f.m.DataVersion);
1139 #ifdef D_SPLICE_ALIAS_RACE
1140 /* Leave some trace that this code is enabled; otherwise it's pretty hard to
1141 * tell. */
1142 static __attribute__((used)) const char dentry_race_marker[] = "d_splice_alias race workaround enabled";
1144 static int
1145 check_dentry_race(struct dentry *dp)
1147 int raced = 0;
1148 if (!dp->d_inode) {
1149 struct dentry *parent = dget_parent(dp);
1151 /* In Linux, before commit 4919c5e45a91b5db5a41695fe0357fbdff0d5767,
1152 * d_splice_alias can momentarily hash a dentry before it's fully
1153 * populated. This only happens for a moment, since it's unhashed again
1154 * right after (in d_move), but this can make the dentry be found by
1155 * __d_lookup, and then given to us.
1157 * So check if the dentry is unhashed; if it is, then the dentry is not
1158 * valid. We lock the parent inode to ensure that d_splice_alias is no
1159 * longer running (the inode mutex will be held during
1160 * afs_linux_lookup). Locking d_lock is required to check the dentry's
1161 * flags, so lock that, too.
1163 afs_linux_lock_inode(parent->d_inode);
1164 spin_lock(&dp->d_lock);
1165 if (d_unhashed(dp)) {
1166 raced = 1;
1168 spin_unlock(&dp->d_lock);
1169 afs_linux_unlock_inode(parent->d_inode);
1171 dput(parent);
1173 return raced;
1175 #endif /* D_SPLICE_ALIAS_RACE */
1177 /* Validate a dentry. Return 1 if unchanged, 0 if VFS layer should re-evaluate.
1178 * In kernels 2.2.10 and above, we are passed an additional flags var which
1179 * may have either the LOOKUP_FOLLOW OR LOOKUP_DIRECTORY set in which case
1180 * we are advised to follow the entry if it is a link or to make sure that
1181 * it is a directory. But since the kernel itself checks these possibilities
1182 * later on, we shouldn't have to do it until later. Perhaps in the future..
1184 * The code here assumes that on entry the global lock is not held
1186 static int
1187 #if defined(DOP_REVALIDATE_TAKES_UNSIGNED)
1188 afs_linux_dentry_revalidate(struct dentry *dp, unsigned int flags)
1189 #elif defined(DOP_REVALIDATE_TAKES_NAMEIDATA)
1190 afs_linux_dentry_revalidate(struct dentry *dp, struct nameidata *nd)
1191 #else
1192 afs_linux_dentry_revalidate(struct dentry *dp, int flags)
1193 #endif
1195 cred_t *credp = NULL;
1196 struct vcache *vcp, *pvcp, *tvc = NULL;
1197 struct dentry *parent;
1198 int valid;
1199 struct afs_fakestat_state fakestate;
1200 int force_drop = 0;
1201 afs_uint32 parent_dv;
1203 #ifdef LOOKUP_RCU
1204 /* We don't support RCU path walking */
1205 # if defined(DOP_REVALIDATE_TAKES_UNSIGNED)
1206 if (flags & LOOKUP_RCU)
1207 # else
1208 if (nd->flags & LOOKUP_RCU)
1209 # endif
1210 return -ECHILD;
1211 #endif
1213 #ifdef D_SPLICE_ALIAS_RACE
1214 if (check_dentry_race(dp)) {
1215 valid = 0;
1216 return valid;
1218 #endif
1220 AFS_GLOCK();
1221 afs_InitFakeStat(&fakestate);
1223 if (dp->d_inode) {
1224 vcp = VTOAFS(dp->d_inode);
1226 if (vcp == afs_globalVp)
1227 goto good_dentry;
1229 if (vcp->mvstat == AFS_MVSTAT_MTPT) {
1230 if (vcp->mvid.target_root && (vcp->f.states & CMValid)) {
1231 int tryEvalOnly = 0;
1232 int code = 0;
1233 struct vrequest *treq = NULL;
1235 credp = crref();
1237 code = afs_CreateReq(&treq, credp);
1238 if (code) {
1239 goto bad_dentry;
1241 if ((strcmp(dp->d_name.name, ".directory") == 0)) {
1242 tryEvalOnly = 1;
1244 if (tryEvalOnly)
1245 code = afs_TryEvalFakeStat(&vcp, &fakestate, treq);
1246 else
1247 code = afs_EvalFakeStat(&vcp, &fakestate, treq);
1248 afs_DestroyReq(treq);
1249 if ((tryEvalOnly && vcp->mvstat == AFS_MVSTAT_MTPT) || code) {
1250 /* a mount point, not yet replaced by its directory */
1251 goto bad_dentry;
1254 } else if (vcp->mvstat == AFS_MVSTAT_ROOT && *dp->d_name.name != '/') {
1255 osi_Assert(vcp->mvid.parent != NULL);
1258 #ifdef notdef
1259 /* If the last looker changes, we should make sure the current
1260 * looker still has permission to examine this file. This would
1261 * always require a crref() which would be "slow".
1263 if (vcp->last_looker != treq.uid) {
1264 if (!afs_AccessOK(vcp, (vType(vcp) == VREG) ? PRSFS_READ : PRSFS_LOOKUP, &treq, CHECK_MODE_BITS)) {
1265 goto bad_dentry;
1268 vcp->last_looker = treq.uid;
1270 #endif
1272 parent = dget_parent(dp);
1273 pvcp = VTOAFS(parent->d_inode);
1274 parent_dv = parent_vcache_dv(parent->d_inode, credp);
1276 /* If the parent's DataVersion has changed or the vnode
1277 * is longer valid, we need to do a full lookup. VerifyVCache
1278 * isn't enough since the vnode may have been renamed.
1281 if (parent_dv > dp->d_time || !(vcp->f.states & CStatd)) {
1282 struct vattr *vattr = NULL;
1283 int code;
1284 int lookup_good;
1286 if (credp == NULL) {
1287 credp = crref();
1289 code = afs_lookup(pvcp, (char *)dp->d_name.name, &tvc, credp);
1291 if (code) {
1292 /* We couldn't perform the lookup, so we're not okay. */
1293 lookup_good = 0;
1295 } else if (tvc == vcp) {
1296 /* We got back the same vcache, so we're good. */
1297 lookup_good = 1;
1299 } else if (tvc == VTOAFS(dp->d_inode)) {
1300 /* We got back the same vcache, so we're good. This is
1301 * different from the above case, because sometimes 'vcp' is
1302 * not the same as the vcache for dp->d_inode, if 'vcp' was a
1303 * mtpt and we evaluated it to a root dir. In rare cases,
1304 * afs_lookup might not evalute the mtpt when we do, or vice
1305 * versa, so the previous case will not succeed. But this is
1306 * still 'correct', so make sure not to mark the dentry as
1307 * invalid; it still points to the same thing! */
1308 lookup_good = 1;
1310 } else {
1311 /* We got back a different file, so we're definitely not
1312 * okay. */
1313 lookup_good = 0;
1316 if (!lookup_good) {
1317 dput(parent);
1318 /* Force unhash; the name doesn't point to this file
1319 * anymore. */
1320 force_drop = 1;
1321 if (code && code != ENOENT) {
1322 /* ...except if we couldn't perform the actual lookup,
1323 * we don't know if the name points to this file or not. */
1324 force_drop = 0;
1326 goto bad_dentry;
1329 code = afs_CreateAttr(&vattr);
1330 if (code) {
1331 dput(parent);
1332 goto bad_dentry;
1335 if (afs_getattr(vcp, vattr, credp)) {
1336 dput(parent);
1337 afs_DestroyAttr(vattr);
1338 goto bad_dentry;
1341 vattr2inode(AFSTOV(vcp), vattr);
1342 dp->d_time = parent_dv;
1344 afs_DestroyAttr(vattr);
1347 /* should we always update the attributes at this point? */
1348 /* unlikely--the vcache entry hasn't changed */
1350 dput(parent);
1352 } else {
1354 /* 'dp' represents a cached negative lookup. */
1356 parent = dget_parent(dp);
1357 pvcp = VTOAFS(parent->d_inode);
1358 parent_dv = parent_vcache_dv(parent->d_inode, credp);
1360 if (parent_dv > dp->d_time || !(pvcp->f.states & CStatd)
1361 || afs_IsDynroot(pvcp)) {
1362 dput(parent);
1363 goto bad_dentry;
1366 dput(parent);
1369 good_dentry:
1370 valid = 1;
1371 goto done;
1373 bad_dentry:
1374 valid = 0;
1375 #ifndef D_INVALIDATE_IS_VOID
1376 /* When (v3.18) d_invalidate was converted to void, it also started
1377 * being called automatically from revalidate, and automatically
1378 * handled:
1379 * - shrink_dcache_parent
1380 * - automatic detach of submounts
1381 * - d_drop
1382 * Therefore, after that point, OpenAFS revalidate logic no longer needs
1383 * to do any of those things itself for invalid dentry structs. We only need
1384 * to tell VFS it's invalid (by returning 0), and VFS will handle the rest.
1386 if (have_submounts(dp))
1387 valid = 1;
1388 #endif
1390 done:
1391 /* Clean up */
1392 if (tvc)
1393 afs_PutVCache(tvc);
1394 afs_PutFakeStat(&fakestate);
1395 AFS_GUNLOCK();
1396 if (credp)
1397 crfree(credp);
1399 #ifndef D_INVALIDATE_IS_VOID
1400 if (!valid) {
1402 * If we had a negative lookup for the name we want to forcibly
1403 * unhash the dentry.
1404 * Otherwise use d_invalidate which will not unhash it if still in use.
1406 if (force_drop) {
1407 shrink_dcache_parent(dp);
1408 d_drop(dp);
1409 } else
1410 d_invalidate(dp);
1412 #endif
1413 return valid;
1417 static void
1418 afs_dentry_iput(struct dentry *dp, struct inode *ip)
1420 struct vcache *vcp = VTOAFS(ip);
1422 AFS_GLOCK();
1423 if (!AFS_IS_DISCONNECTED || (vcp->f.states & CUnlinked)) {
1424 (void) afs_InactiveVCache(vcp, NULL);
1426 AFS_GUNLOCK();
1427 afs_linux_clear_nfsfs_renamed(dp);
1429 iput(ip);
1432 static int
1433 #if defined(DOP_D_DELETE_TAKES_CONST)
1434 afs_dentry_delete(const struct dentry *dp)
1435 #else
1436 afs_dentry_delete(struct dentry *dp)
1437 #endif
1439 if (dp->d_inode && (VTOAFS(dp->d_inode)->f.states & CUnlinked))
1440 return 1; /* bad inode? */
1442 return 0;
1445 #ifdef STRUCT_DENTRY_OPERATIONS_HAS_D_AUTOMOUNT
1446 static struct vfsmount *
1447 afs_dentry_automount(afs_linux_path_t *path)
1449 struct dentry *target;
1452 * Avoid symlink resolution limits when resolving; we cannot contribute to
1453 * an infinite symlink loop.
1455 * On newer kernels the field has moved to the private nameidata structure
1456 * so we can't adjust it here. This may cause ELOOP when using a path with
1457 * 40 or more directories that are not already in the dentry cache.
1459 #if defined(STRUCT_TASK_STRUCT_HAS_TOTAL_LINK_COUNT)
1460 current->total_link_count--;
1461 #endif
1463 target = canonical_dentry(path->dentry->d_inode);
1465 if (target == path->dentry) {
1466 dput(target);
1467 target = NULL;
1470 if (target) {
1471 dput(path->dentry);
1472 path->dentry = target;
1474 } else {
1475 spin_lock(&path->dentry->d_lock);
1476 path->dentry->d_flags &= ~DCACHE_NEED_AUTOMOUNT;
1477 spin_unlock(&path->dentry->d_lock);
1480 return NULL;
1482 #endif /* STRUCT_DENTRY_OPERATIONS_HAS_D_AUTOMOUNT */
1484 struct dentry_operations afs_dentry_operations = {
1485 .d_revalidate = afs_linux_dentry_revalidate,
1486 .d_delete = afs_dentry_delete,
1487 .d_iput = afs_dentry_iput,
1488 #ifdef STRUCT_DENTRY_OPERATIONS_HAS_D_AUTOMOUNT
1489 .d_automount = afs_dentry_automount,
1490 #endif /* STRUCT_DENTRY_OPERATIONS_HAS_D_AUTOMOUNT */
1493 /**********************************************************************
1494 * AFS Linux inode operations
1495 **********************************************************************/
1497 /* afs_linux_create
1499 * Merely need to set enough of vattr to get us through the create. Note
1500 * that the higher level code (open_namei) will take care of any tuncation
1501 * explicitly. Exclusive open is also taken care of in open_namei.
1503 * name is in kernel space at this point.
1505 static int
1506 #if defined(IOP_CREATE_TAKES_BOOL)
1507 afs_linux_create(struct inode *dip, struct dentry *dp, umode_t mode,
1508 bool excl)
1509 #elif defined(IOP_CREATE_TAKES_UMODE_T)
1510 afs_linux_create(struct inode *dip, struct dentry *dp, umode_t mode,
1511 struct nameidata *nd)
1512 #elif defined(IOP_CREATE_TAKES_NAMEIDATA)
1513 afs_linux_create(struct inode *dip, struct dentry *dp, int mode,
1514 struct nameidata *nd)
1515 #else
1516 afs_linux_create(struct inode *dip, struct dentry *dp, int mode)
1517 #endif
1519 struct vattr *vattr = NULL;
1520 cred_t *credp = crref();
1521 const char *name = dp->d_name.name;
1522 struct vcache *vcp;
1523 int code;
1525 AFS_GLOCK();
1527 code = afs_CreateAttr(&vattr);
1528 if (code) {
1529 goto out;
1531 vattr->va_mode = mode;
1532 vattr->va_type = mode & S_IFMT;
1534 code = afs_create(VTOAFS(dip), (char *)name, vattr, NONEXCL, mode,
1535 &vcp, credp);
1537 if (!code) {
1538 struct inode *ip = AFSTOV(vcp);
1540 afs_getattr(vcp, vattr, credp);
1541 afs_fill_inode(ip, vattr);
1542 insert_inode_hash(ip);
1543 #if !defined(STRUCT_SUPER_BLOCK_HAS_S_D_OP)
1544 dp->d_op = &afs_dentry_operations;
1545 #endif
1546 dp->d_time = parent_vcache_dv(dip, credp);
1547 d_instantiate(dp, ip);
1550 afs_DestroyAttr(vattr);
1552 out:
1553 AFS_GUNLOCK();
1555 crfree(credp);
1556 return afs_convert_code(code);
1559 /* afs_linux_lookup */
1560 static struct dentry *
1561 #if defined(IOP_LOOKUP_TAKES_UNSIGNED)
1562 afs_linux_lookup(struct inode *dip, struct dentry *dp,
1563 unsigned flags)
1564 #elif defined(IOP_LOOKUP_TAKES_NAMEIDATA)
1565 afs_linux_lookup(struct inode *dip, struct dentry *dp,
1566 struct nameidata *nd)
1567 #else
1568 afs_linux_lookup(struct inode *dip, struct dentry *dp)
1569 #endif
1571 cred_t *credp = crref();
1572 struct vcache *vcp = NULL;
1573 const char *comp = dp->d_name.name;
1574 struct inode *ip = NULL;
1575 struct dentry *newdp = NULL;
1576 int code;
1578 AFS_GLOCK();
1580 code = afs_lookup(VTOAFS(dip), (char *)comp, &vcp, credp);
1581 if (code == ENOENT) {
1582 /* It's ok for the file to not be found. That's noted by the caller by
1583 * seeing that the dp->d_inode field is NULL (set by d_splice_alias or
1584 * d_add, below). */
1585 code = 0;
1586 osi_Assert(vcp == NULL);
1588 if (code) {
1589 AFS_GUNLOCK();
1590 goto done;
1593 if (vcp) {
1594 struct vattr *vattr = NULL;
1595 struct vcache *parent_vc = VTOAFS(dip);
1597 if (parent_vc == vcp) {
1598 /* This is possible if the parent dir is a mountpoint to a volume,
1599 * and the dir entry we looked up is a mountpoint to the same
1600 * volume. Linux cannot cope with this, so return an error instead
1601 * of risking a deadlock or panic. */
1602 afs_PutVCache(vcp);
1603 code = EDEADLK;
1604 AFS_GUNLOCK();
1605 goto done;
1608 code = afs_CreateAttr(&vattr);
1609 if (code) {
1610 afs_PutVCache(vcp);
1611 AFS_GUNLOCK();
1612 goto done;
1615 ip = AFSTOV(vcp);
1616 afs_getattr(vcp, vattr, credp);
1617 afs_fill_inode(ip, vattr);
1618 if (hlist_unhashed(&ip->i_hash))
1619 insert_inode_hash(ip);
1621 afs_DestroyAttr(vattr);
1623 #if !defined(STRUCT_SUPER_BLOCK_HAS_S_D_OP)
1624 dp->d_op = &afs_dentry_operations;
1625 #endif
1626 dp->d_time = parent_vcache_dv(dip, credp);
1628 AFS_GUNLOCK();
1630 if (ip && S_ISDIR(ip->i_mode)) {
1631 d_prune_aliases(ip);
1633 #ifdef STRUCT_DENTRY_OPERATIONS_HAS_D_AUTOMOUNT
1634 /* Only needed if this is a volume root */
1635 if (vcp->mvstat == 2)
1636 ip->i_flags |= S_AUTOMOUNT;
1637 #endif
1640 * Take an extra reference so the inode doesn't go away if
1641 * d_splice_alias drops our reference on error.
1643 if (ip)
1644 #ifdef HAVE_LINUX_IHOLD
1645 ihold(ip);
1646 #else
1647 igrab(ip);
1648 #endif
1650 newdp = d_splice_alias(ip, dp);
1652 done:
1653 crfree(credp);
1655 if (IS_ERR(newdp)) {
1656 /* d_splice_alias can return an error (EIO) if there is an existing
1657 * connected directory alias for this dentry. Add our dentry manually
1658 * ourselves if this happens. */
1659 d_add(dp, ip);
1661 #if defined(D_SPLICE_ALIAS_LEAK_ON_ERROR)
1662 /* Depending on the kernel version, d_splice_alias may or may not drop
1663 * the inode reference on error. If it didn't, do it here. */
1664 iput(ip);
1665 #endif
1666 return NULL;
1669 if (code) {
1670 if (ip)
1671 iput(ip);
1672 return ERR_PTR(afs_convert_code(code));
1675 iput(ip);
1676 return newdp;
1679 static int
1680 afs_linux_link(struct dentry *olddp, struct inode *dip, struct dentry *newdp)
1682 int code;
1683 cred_t *credp = crref();
1684 const char *name = newdp->d_name.name;
1685 struct inode *oldip = olddp->d_inode;
1687 /* If afs_link returned the vnode, we could instantiate the
1688 * dentry. Since it's not, we drop this one and do a new lookup.
1690 d_drop(newdp);
1692 AFS_GLOCK();
1693 code = afs_link(VTOAFS(oldip), VTOAFS(dip), (char *)name, credp);
1695 AFS_GUNLOCK();
1696 crfree(credp);
1697 return afs_convert_code(code);
1700 /* We have to have a Linux specific sillyrename function, because we
1701 * also have to keep the dcache up to date when we're doing a silly
1702 * rename - so we don't want the generic vnodeops doing this behind our
1703 * back.
1706 static int
1707 afs_linux_sillyrename(struct inode *dir, struct dentry *dentry,
1708 cred_t *credp)
1710 struct vcache *tvc = VTOAFS(dentry->d_inode);
1711 struct dentry *__dp = NULL;
1712 char *__name = NULL;
1713 int code;
1715 if (afs_linux_nfsfs_renamed(dentry))
1716 return EBUSY;
1718 do {
1719 dput(__dp);
1721 AFS_GLOCK();
1722 if (__name)
1723 osi_FreeSmallSpace(__name);
1724 __name = afs_newname();
1725 AFS_GUNLOCK();
1727 __dp = lookup_one_len(__name, dentry->d_parent, strlen(__name));
1729 if (IS_ERR(__dp)) {
1730 osi_FreeSmallSpace(__name);
1731 return EBUSY;
1733 } while (__dp->d_inode != NULL);
1735 AFS_GLOCK();
1736 code = afs_rename(VTOAFS(dir), (char *)dentry->d_name.name,
1737 VTOAFS(dir), (char *)__dp->d_name.name,
1738 credp);
1739 if (!code) {
1740 tvc->mvid.silly_name = __name;
1741 crhold(credp);
1742 if (tvc->uncred) {
1743 crfree(tvc->uncred);
1745 tvc->uncred = credp;
1746 tvc->f.states |= CUnlinked;
1747 afs_linux_set_nfsfs_renamed(dentry);
1749 __dp->d_time = 0; /* force to revalidate */
1750 d_move(dentry, __dp);
1751 } else {
1752 osi_FreeSmallSpace(__name);
1754 AFS_GUNLOCK();
1756 dput(__dp);
1758 return code;
1762 static int
1763 afs_linux_unlink(struct inode *dip, struct dentry *dp)
1765 int code = EBUSY;
1766 cred_t *credp = crref();
1767 const char *name = dp->d_name.name;
1768 struct vcache *tvc = VTOAFS(dp->d_inode);
1770 if (VREFCOUNT(tvc) > 1 && tvc->opens > 0
1771 && !(tvc->f.states & CUnlinked)) {
1773 code = afs_linux_sillyrename(dip, dp, credp);
1774 } else {
1775 AFS_GLOCK();
1776 code = afs_remove(VTOAFS(dip), (char *)name, credp);
1777 AFS_GUNLOCK();
1778 if (!code)
1779 d_drop(dp);
1782 crfree(credp);
1783 return afs_convert_code(code);
1787 static int
1788 afs_linux_symlink(struct inode *dip, struct dentry *dp, const char *target)
1790 int code;
1791 cred_t *credp = crref();
1792 struct vattr *vattr = NULL;
1793 const char *name = dp->d_name.name;
1795 /* If afs_symlink returned the vnode, we could instantiate the
1796 * dentry. Since it's not, we drop this one and do a new lookup.
1798 d_drop(dp);
1800 AFS_GLOCK();
1801 code = afs_CreateAttr(&vattr);
1802 if (code) {
1803 goto out;
1806 code = afs_symlink(VTOAFS(dip), (char *)name, vattr, (char *)target, NULL,
1807 credp);
1808 afs_DestroyAttr(vattr);
1810 out:
1811 AFS_GUNLOCK();
1812 crfree(credp);
1813 return afs_convert_code(code);
1816 static int
1817 #if defined(IOP_MKDIR_TAKES_UMODE_T)
1818 afs_linux_mkdir(struct inode *dip, struct dentry *dp, umode_t mode)
1819 #else
1820 afs_linux_mkdir(struct inode *dip, struct dentry *dp, int mode)
1821 #endif
1823 int code;
1824 cred_t *credp = crref();
1825 struct vcache *tvcp = NULL;
1826 struct vattr *vattr = NULL;
1827 const char *name = dp->d_name.name;
1829 AFS_GLOCK();
1830 code = afs_CreateAttr(&vattr);
1831 if (code) {
1832 goto out;
1835 vattr->va_mask = ATTR_MODE;
1836 vattr->va_mode = mode;
1838 code = afs_mkdir(VTOAFS(dip), (char *)name, vattr, &tvcp, credp);
1840 if (tvcp) {
1841 struct inode *ip = AFSTOV(tvcp);
1843 afs_getattr(tvcp, vattr, credp);
1844 afs_fill_inode(ip, vattr);
1846 #if !defined(STRUCT_SUPER_BLOCK_HAS_S_D_OP)
1847 dp->d_op = &afs_dentry_operations;
1848 #endif
1849 dp->d_time = parent_vcache_dv(dip, credp);
1850 d_instantiate(dp, ip);
1852 afs_DestroyAttr(vattr);
1854 out:
1855 AFS_GUNLOCK();
1857 crfree(credp);
1858 return afs_convert_code(code);
1861 static int
1862 afs_linux_rmdir(struct inode *dip, struct dentry *dp)
1864 int code;
1865 cred_t *credp = crref();
1866 const char *name = dp->d_name.name;
1868 /* locking kernel conflicts with glock? */
1870 AFS_GLOCK();
1871 code = afs_rmdir(VTOAFS(dip), (char *)name, credp);
1872 AFS_GUNLOCK();
1874 /* Linux likes to see ENOTEMPTY returned from an rmdir() syscall
1875 * that failed because a directory is not empty. So, we map
1876 * EEXIST to ENOTEMPTY on linux.
1878 if (code == EEXIST) {
1879 code = ENOTEMPTY;
1882 if (!code) {
1883 d_drop(dp);
1886 crfree(credp);
1887 return afs_convert_code(code);
1891 static int
1892 afs_linux_rename(struct inode *oldip, struct dentry *olddp,
1893 struct inode *newip, struct dentry *newdp
1894 #ifdef HAVE_LINUX_INODE_OPERATIONS_RENAME_TAKES_FLAGS
1895 , unsigned int flags
1896 #endif
1899 int code;
1900 cred_t *credp = crref();
1901 const char *oldname = olddp->d_name.name;
1902 const char *newname = newdp->d_name.name;
1903 struct dentry *rehash = NULL;
1905 #ifdef HAVE_LINUX_INODE_OPERATIONS_RENAME_TAKES_FLAGS
1906 if (flags)
1907 return -EINVAL; /* no support for new flags yet */
1908 #endif
1910 /* Prevent any new references during rename operation. */
1912 if (!d_unhashed(newdp)) {
1913 d_drop(newdp);
1914 rehash = newdp;
1917 afs_maybe_shrink_dcache(olddp);
1919 AFS_GLOCK();
1920 code = afs_rename(VTOAFS(oldip), (char *)oldname, VTOAFS(newip), (char *)newname, credp);
1921 AFS_GUNLOCK();
1923 if (!code)
1924 olddp->d_time = 0; /* force to revalidate */
1926 if (rehash)
1927 d_rehash(rehash);
1929 crfree(credp);
1930 return afs_convert_code(code);
1934 /* afs_linux_ireadlink
1935 * Internal readlink which can return link contents to user or kernel space.
1936 * Note that the buffer is NOT supposed to be null-terminated.
1938 static int
1939 afs_linux_ireadlink(struct inode *ip, char *target, int maxlen, uio_seg_t seg)
1941 int code;
1942 cred_t *credp = crref();
1943 struct uio tuio;
1944 struct iovec iov;
1946 memset(&tuio, 0, sizeof(tuio));
1947 memset(&iov, 0, sizeof(iov));
1949 setup_uio(&tuio, &iov, target, (afs_offs_t) 0, maxlen, UIO_READ, seg);
1950 code = afs_readlink(VTOAFS(ip), &tuio, credp);
1951 crfree(credp);
1953 if (!code)
1954 return maxlen - tuio.uio_resid;
1955 else
1956 return afs_convert_code(code);
1959 #if !defined(USABLE_KERNEL_PAGE_SYMLINK_CACHE)
1960 /* afs_linux_readlink
1961 * Fill target (which is in user space) with contents of symlink.
1963 static int
1964 afs_linux_readlink(struct dentry *dp, char *target, int maxlen)
1966 int code;
1967 struct inode *ip = dp->d_inode;
1969 AFS_GLOCK();
1970 code = afs_linux_ireadlink(ip, target, maxlen, AFS_UIOUSER);
1971 AFS_GUNLOCK();
1972 return code;
1976 /* afs_linux_follow_link
1977 * a file system dependent link following routine.
1979 #if defined(HAVE_LINUX_INODE_OPERATIONS_FOLLOW_LINK_NO_NAMEIDATA)
1980 static const char *afs_linux_follow_link(struct dentry *dentry, void **link_data)
1981 #else
1982 static int afs_linux_follow_link(struct dentry *dentry, struct nameidata *nd)
1983 #endif
1985 int code;
1986 char *name;
1988 name = kmalloc(PATH_MAX, GFP_NOFS);
1989 if (!name) {
1990 #if defined(HAVE_LINUX_INODE_OPERATIONS_FOLLOW_LINK_NO_NAMEIDATA)
1991 return ERR_PTR(-EIO);
1992 #else
1993 return -EIO;
1994 #endif
1997 AFS_GLOCK();
1998 code = afs_linux_ireadlink(dentry->d_inode, name, PATH_MAX - 1, AFS_UIOSYS);
1999 AFS_GUNLOCK();
2001 if (code < 0) {
2002 #if defined(HAVE_LINUX_INODE_OPERATIONS_FOLLOW_LINK_NO_NAMEIDATA)
2003 return ERR_PTR(code);
2004 #else
2005 return code;
2006 #endif
2009 name[code] = '\0';
2010 #if defined(HAVE_LINUX_INODE_OPERATIONS_FOLLOW_LINK_NO_NAMEIDATA)
2011 return *link_data = name;
2012 #else
2013 nd_set_link(nd, name);
2014 return 0;
2015 #endif
2018 #if defined(HAVE_LINUX_INODE_OPERATIONS_PUT_LINK_NO_NAMEIDATA)
2019 static void
2020 afs_linux_put_link(struct inode *inode, void *link_data)
2022 char *name = link_data;
2024 if (name && !IS_ERR(name))
2025 kfree(name);
2027 #else
2028 static void
2029 afs_linux_put_link(struct dentry *dentry, struct nameidata *nd)
2031 char *name = nd_get_link(nd);
2033 if (name && !IS_ERR(name))
2034 kfree(name);
2036 #endif /* HAVE_LINUX_INODE_OPERATIONS_PUT_LINK_NO_NAMEIDATA */
2038 #endif /* USABLE_KERNEL_PAGE_SYMLINK_CACHE */
2040 /* Populate a page by filling it from the cache file pointed at by cachefp
2041 * (which contains indicated chunk)
2042 * If task is NULL, the page copy occurs syncronously, and the routine
2043 * returns with page still locked. If task is non-NULL, then page copies
2044 * may occur in the background, and the page will be unlocked when it is
2045 * ready for use.
2047 static int
2048 afs_linux_read_cache(struct file *cachefp, struct page *page,
2049 int chunk, struct pagevec *lrupv,
2050 struct afs_pagecopy_task *task) {
2051 loff_t offset = page_offset(page);
2052 struct inode *cacheinode = cachefp->f_dentry->d_inode;
2053 struct page *newpage, *cachepage;
2054 struct address_space *cachemapping;
2055 int pageindex;
2056 int code = 0;
2058 cachemapping = cacheinode->i_mapping;
2059 newpage = NULL;
2060 cachepage = NULL;
2062 /* If we're trying to read a page that's past the end of the disk
2063 * cache file, then just return a zeroed page */
2064 if (AFS_CHUNKOFFSET(offset) >= i_size_read(cacheinode)) {
2065 zero_user_segment(page, 0, PAGE_SIZE);
2066 SetPageUptodate(page);
2067 if (task)
2068 unlock_page(page);
2069 return 0;
2072 /* From our offset, we now need to work out which page in the disk
2073 * file it corresponds to. This will be fun ... */
2074 pageindex = (offset - AFS_CHUNKTOBASE(chunk)) >> PAGE_SHIFT;
2076 while (cachepage == NULL) {
2077 cachepage = find_get_page(cachemapping, pageindex);
2078 if (!cachepage) {
2079 if (!newpage)
2080 newpage = page_cache_alloc_cold(cachemapping);
2081 if (!newpage) {
2082 code = -ENOMEM;
2083 goto out;
2086 code = add_to_page_cache(newpage, cachemapping,
2087 pageindex, GFP_KERNEL);
2088 if (code == 0) {
2089 cachepage = newpage;
2090 newpage = NULL;
2092 get_page(cachepage);
2093 if (!pagevec_add(lrupv, cachepage))
2094 __pagevec_lru_add_file(lrupv);
2096 } else {
2097 put_page(newpage);
2098 newpage = NULL;
2099 if (code != -EEXIST)
2100 goto out;
2102 } else {
2103 lock_page(cachepage);
2107 if (!PageUptodate(cachepage)) {
2108 ClearPageError(cachepage);
2109 code = cachemapping->a_ops->readpage(NULL, cachepage);
2110 if (!code && !task) {
2111 wait_on_page_locked(cachepage);
2113 } else {
2114 unlock_page(cachepage);
2117 if (!code) {
2118 if (PageUptodate(cachepage)) {
2119 copy_highpage(page, cachepage);
2120 flush_dcache_page(page);
2121 SetPageUptodate(page);
2123 if (task)
2124 unlock_page(page);
2125 } else if (task) {
2126 afs_pagecopy_queue_page(task, cachepage, page);
2127 } else {
2128 code = -EIO;
2132 if (code && task) {
2133 unlock_page(page);
2136 out:
2137 if (cachepage)
2138 put_page(cachepage);
2140 return code;
2143 static int inline
2144 afs_linux_readpage_fastpath(struct file *fp, struct page *pp, int *codep)
2146 loff_t offset = page_offset(pp);
2147 struct inode *ip = FILE_INODE(fp);
2148 struct vcache *avc = VTOAFS(ip);
2149 struct dcache *tdc;
2150 struct file *cacheFp = NULL;
2151 int code;
2152 int dcLocked = 0;
2153 struct pagevec lrupv;
2155 /* Not a UFS cache, don't do anything */
2156 if (cacheDiskType != AFS_FCACHE_TYPE_UFS)
2157 return 0;
2159 /* No readpage (ex: tmpfs) , skip */
2160 if (cachefs_noreadpage)
2161 return 0;
2163 /* Can't do anything if the vcache isn't statd , or if the read
2164 * crosses a chunk boundary.
2166 if (!(avc->f.states & CStatd) ||
2167 AFS_CHUNK(offset) != AFS_CHUNK(offset + PAGE_SIZE)) {
2168 return 0;
2171 ObtainWriteLock(&avc->lock, 911);
2173 /* XXX - See if hinting actually makes things faster !!! */
2175 /* See if we have a suitable entry already cached */
2176 tdc = avc->dchint;
2178 if (tdc) {
2179 /* We need to lock xdcache, then dcache, to handle situations where
2180 * the hint is on the free list. However, we can't safely do this
2181 * according to the locking hierarchy. So, use a non blocking lock.
2183 ObtainReadLock(&afs_xdcache);
2184 dcLocked = ( 0 == NBObtainReadLock(&tdc->lock));
2186 if (dcLocked && (tdc->index != NULLIDX)
2187 && !FidCmp(&tdc->f.fid, &avc->f.fid)
2188 && tdc->f.chunk == AFS_CHUNK(offset)
2189 && !(afs_indexFlags[tdc->index] & (IFFree | IFDiscarded))) {
2190 /* Bonus - the hint was correct */
2191 afs_RefDCache(tdc);
2192 } else {
2193 /* Only destroy the hint if its actually invalid, not if there's
2194 * just been a locking failure */
2195 if (dcLocked) {
2196 ReleaseReadLock(&tdc->lock);
2197 avc->dchint = NULL;
2200 tdc = NULL;
2201 dcLocked = 0;
2203 ReleaseReadLock(&afs_xdcache);
2206 /* No hint, or hint is no longer valid - see if we can get something
2207 * directly from the dcache
2209 if (!tdc)
2210 tdc = afs_FindDCache(avc, offset);
2212 if (!tdc) {
2213 ReleaseWriteLock(&avc->lock);
2214 return 0;
2217 if (!dcLocked)
2218 ObtainReadLock(&tdc->lock);
2220 /* Is the dcache we've been given currently up to date */
2221 if (!hsame(avc->f.m.DataVersion, tdc->f.versionNo) ||
2222 (tdc->dflags & DFFetching))
2223 goto out;
2225 /* Update our hint for future abuse */
2226 avc->dchint = tdc;
2228 /* Okay, so we've now got a cache file that is up to date */
2230 /* XXX - I suspect we should be locking the inodes before we use them! */
2231 AFS_GUNLOCK();
2232 cacheFp = afs_linux_raw_open(&tdc->f.inode);
2233 if (!cacheFp->f_dentry->d_inode->i_mapping->a_ops->readpage) {
2234 cachefs_noreadpage = 1;
2235 AFS_GLOCK();
2236 goto out;
2238 pagevec_init(&lrupv, 0);
2240 code = afs_linux_read_cache(cacheFp, pp, tdc->f.chunk, &lrupv, NULL);
2242 if (pagevec_count(&lrupv))
2243 __pagevec_lru_add_file(&lrupv);
2245 filp_close(cacheFp, NULL);
2246 AFS_GLOCK();
2248 ReleaseReadLock(&tdc->lock);
2249 ReleaseWriteLock(&avc->lock);
2250 afs_PutDCache(tdc);
2252 *codep = code;
2253 return 1;
2255 out:
2256 ReleaseWriteLock(&avc->lock);
2257 ReleaseReadLock(&tdc->lock);
2258 afs_PutDCache(tdc);
2259 return 0;
2262 /* afs_linux_readpage
2264 * This function is split into two, because prepare_write/begin_write
2265 * require a readpage call which doesn't unlock the resulting page upon
2266 * success.
2268 static int
2269 afs_linux_fillpage(struct file *fp, struct page *pp)
2271 afs_int32 code;
2272 char *address;
2273 struct uio *auio;
2274 struct iovec *iovecp;
2275 struct inode *ip = FILE_INODE(fp);
2276 afs_int32 cnt = page_count(pp);
2277 struct vcache *avc = VTOAFS(ip);
2278 afs_offs_t offset = page_offset(pp);
2279 cred_t *credp;
2281 AFS_GLOCK();
2282 if (afs_linux_readpage_fastpath(fp, pp, &code)) {
2283 AFS_GUNLOCK();
2284 return code;
2286 AFS_GUNLOCK();
2288 credp = crref();
2289 address = kmap(pp);
2290 ClearPageError(pp);
2292 auio = kmalloc(sizeof(struct uio), GFP_NOFS);
2293 iovecp = kmalloc(sizeof(struct iovec), GFP_NOFS);
2295 setup_uio(auio, iovecp, (char *)address, offset, PAGE_SIZE, UIO_READ,
2296 AFS_UIOSYS);
2298 AFS_GLOCK();
2299 AFS_DISCON_LOCK();
2300 afs_Trace4(afs_iclSetp, CM_TRACE_READPAGE, ICL_TYPE_POINTER, ip,
2301 ICL_TYPE_POINTER, pp, ICL_TYPE_INT32, cnt, ICL_TYPE_INT32,
2302 99999); /* not a possible code value */
2304 code = afs_rdwr(avc, auio, UIO_READ, 0, credp);
2306 afs_Trace4(afs_iclSetp, CM_TRACE_READPAGE, ICL_TYPE_POINTER, ip,
2307 ICL_TYPE_POINTER, pp, ICL_TYPE_INT32, cnt, ICL_TYPE_INT32,
2308 code);
2309 AFS_DISCON_UNLOCK();
2310 AFS_GUNLOCK();
2311 if (!code) {
2312 /* XXX valid for no-cache also? Check last bits of files... :)
2313 * Cognate code goes in afs_NoCacheFetchProc. */
2314 if (auio->uio_resid) /* zero remainder of page */
2315 memset((void *)(address + (PAGE_SIZE - auio->uio_resid)), 0,
2316 auio->uio_resid);
2318 flush_dcache_page(pp);
2319 SetPageUptodate(pp);
2320 } /* !code */
2322 kunmap(pp);
2324 kfree(auio);
2325 kfree(iovecp);
2327 crfree(credp);
2328 return afs_convert_code(code);
2331 static int
2332 afs_linux_prefetch(struct file *fp, struct page *pp)
2334 int code = 0;
2335 struct vcache *avc = VTOAFS(FILE_INODE(fp));
2336 afs_offs_t offset = page_offset(pp);
2338 if (AFS_CHUNKOFFSET(offset) == 0) {
2339 struct dcache *tdc;
2340 struct vrequest *treq = NULL;
2341 cred_t *credp;
2343 credp = crref();
2344 AFS_GLOCK();
2345 code = afs_CreateReq(&treq, credp);
2346 if (!code && !NBObtainWriteLock(&avc->lock, 534)) {
2347 tdc = afs_FindDCache(avc, offset);
2348 if (tdc) {
2349 if (!(tdc->mflags & DFNextStarted))
2350 afs_PrefetchChunk(avc, tdc, credp, treq);
2351 afs_PutDCache(tdc);
2353 ReleaseWriteLock(&avc->lock);
2355 afs_DestroyReq(treq);
2356 AFS_GUNLOCK();
2357 crfree(credp);
2359 return afs_convert_code(code);
2363 static int
2364 afs_linux_bypass_readpages(struct file *fp, struct address_space *mapping,
2365 struct list_head *page_list, unsigned num_pages)
2367 afs_int32 page_ix;
2368 struct uio *auio;
2369 afs_offs_t offset;
2370 struct iovec* iovecp;
2371 struct nocache_read_request *ancr;
2372 struct page *pp;
2373 struct pagevec lrupv;
2374 afs_int32 code = 0;
2376 cred_t *credp;
2377 struct inode *ip = FILE_INODE(fp);
2378 struct vcache *avc = VTOAFS(ip);
2379 afs_int32 base_index = 0;
2380 afs_int32 page_count = 0;
2381 afs_int32 isize;
2383 /* background thread must free: iovecp, auio, ancr */
2384 iovecp = osi_Alloc(num_pages * sizeof(struct iovec));
2386 auio = osi_Alloc(sizeof(struct uio));
2387 auio->uio_iov = iovecp;
2388 auio->uio_iovcnt = num_pages;
2389 auio->uio_flag = UIO_READ;
2390 auio->uio_seg = AFS_UIOSYS;
2391 auio->uio_resid = num_pages * PAGE_SIZE;
2393 ancr = osi_Alloc(sizeof(struct nocache_read_request));
2394 ancr->auio = auio;
2395 ancr->offset = auio->uio_offset;
2396 ancr->length = auio->uio_resid;
2398 pagevec_init(&lrupv, 0);
2400 for(page_ix = 0; page_ix < num_pages; ++page_ix) {
2402 if(list_empty(page_list))
2403 break;
2405 pp = list_entry(page_list->prev, struct page, lru);
2406 /* If we allocate a page and don't remove it from page_list,
2407 * the page cache gets upset. */
2408 list_del(&pp->lru);
2409 isize = (i_size_read(fp->f_mapping->host) - 1) >> PAGE_SHIFT;
2410 if(pp->index > isize) {
2411 if(PageLocked(pp))
2412 unlock_page(pp);
2413 continue;
2416 if(page_ix == 0) {
2417 offset = page_offset(pp);
2418 ancr->offset = auio->uio_offset = offset;
2419 base_index = pp->index;
2421 iovecp[page_ix].iov_len = PAGE_SIZE;
2422 code = add_to_page_cache(pp, mapping, pp->index, GFP_KERNEL);
2423 if(base_index != pp->index) {
2424 if(PageLocked(pp))
2425 unlock_page(pp);
2426 put_page(pp);
2427 iovecp[page_ix].iov_base = (void *) 0;
2428 base_index++;
2429 ancr->length -= PAGE_SIZE;
2430 continue;
2432 base_index++;
2433 if(code) {
2434 if(PageLocked(pp))
2435 unlock_page(pp);
2436 put_page(pp);
2437 iovecp[page_ix].iov_base = (void *) 0;
2438 } else {
2439 page_count++;
2440 if(!PageLocked(pp)) {
2441 lock_page(pp);
2444 /* increment page refcount--our original design assumed
2445 * that locking it would effectively pin it; protect
2446 * ourselves from the possiblity that this assumption is
2447 * is faulty, at low cost (provided we do not fail to
2448 * do the corresponding decref on the other side) */
2449 get_page(pp);
2451 /* save the page for background map */
2452 iovecp[page_ix].iov_base = (void*) pp;
2454 /* and put it on the LRU cache */
2455 if (!pagevec_add(&lrupv, pp))
2456 __pagevec_lru_add_file(&lrupv);
2460 /* If there were useful pages in the page list, make sure all pages
2461 * are in the LRU cache, then schedule the read */
2462 if(page_count) {
2463 if (pagevec_count(&lrupv))
2464 __pagevec_lru_add_file(&lrupv);
2465 credp = crref();
2466 code = afs_ReadNoCache(avc, ancr, credp);
2467 crfree(credp);
2468 } else {
2469 /* If there is nothing for the background thread to handle,
2470 * it won't be freeing the things that we never gave it */
2471 osi_Free(iovecp, num_pages * sizeof(struct iovec));
2472 osi_Free(auio, sizeof(struct uio));
2473 osi_Free(ancr, sizeof(struct nocache_read_request));
2475 /* we do not flush, release, or unmap pages--that will be
2476 * done for us by the background thread as each page comes in
2477 * from the fileserver */
2478 return afs_convert_code(code);
2482 static int
2483 afs_linux_bypass_readpage(struct file *fp, struct page *pp)
2485 cred_t *credp = NULL;
2486 struct uio *auio;
2487 struct iovec *iovecp;
2488 struct nocache_read_request *ancr;
2489 int code;
2492 * Special case: if page is at or past end of file, just zero it and set
2493 * it as up to date.
2495 if (page_offset(pp) >= i_size_read(fp->f_mapping->host)) {
2496 zero_user_segment(pp, 0, PAGE_SIZE);
2497 SetPageUptodate(pp);
2498 unlock_page(pp);
2499 return 0;
2502 ClearPageError(pp);
2504 /* receiver frees */
2505 auio = osi_Alloc(sizeof(struct uio));
2506 iovecp = osi_Alloc(sizeof(struct iovec));
2508 /* address can be NULL, because we overwrite it with 'pp', below */
2509 setup_uio(auio, iovecp, NULL, page_offset(pp),
2510 PAGE_SIZE, UIO_READ, AFS_UIOSYS);
2512 /* save the page for background map */
2513 get_page(pp); /* see above */
2514 auio->uio_iov->iov_base = (void*) pp;
2515 /* the background thread will free this */
2516 ancr = osi_Alloc(sizeof(struct nocache_read_request));
2517 ancr->auio = auio;
2518 ancr->offset = page_offset(pp);
2519 ancr->length = PAGE_SIZE;
2521 credp = crref();
2522 code = afs_ReadNoCache(VTOAFS(FILE_INODE(fp)), ancr, credp);
2523 crfree(credp);
2525 return afs_convert_code(code);
2528 static inline int
2529 afs_linux_can_bypass(struct inode *ip) {
2531 switch(cache_bypass_strategy) {
2532 case NEVER_BYPASS_CACHE:
2533 return 0;
2534 case ALWAYS_BYPASS_CACHE:
2535 return 1;
2536 case LARGE_FILES_BYPASS_CACHE:
2537 if (i_size_read(ip) > cache_bypass_threshold)
2538 return 1;
2539 default:
2540 return 0;
2544 /* Check if a file is permitted to bypass the cache by policy, and modify
2545 * the cache bypass state recorded for that file */
2547 static inline int
2548 afs_linux_bypass_check(struct inode *ip) {
2549 cred_t* credp;
2551 int bypass = afs_linux_can_bypass(ip);
2553 credp = crref();
2554 trydo_cache_transition(VTOAFS(ip), credp, bypass);
2555 crfree(credp);
2557 return bypass;
2561 static int
2562 afs_linux_readpage(struct file *fp, struct page *pp)
2564 int code;
2566 if (afs_linux_bypass_check(FILE_INODE(fp))) {
2567 code = afs_linux_bypass_readpage(fp, pp);
2568 } else {
2569 code = afs_linux_fillpage(fp, pp);
2570 if (!code)
2571 code = afs_linux_prefetch(fp, pp);
2572 unlock_page(pp);
2575 return code;
2578 /* Readpages reads a number of pages for a particular file. We use
2579 * this to optimise the reading, by limiting the number of times upon which
2580 * we have to lookup, lock and open vcaches and dcaches
2583 static int
2584 afs_linux_readpages(struct file *fp, struct address_space *mapping,
2585 struct list_head *page_list, unsigned int num_pages)
2587 struct inode *inode = mapping->host;
2588 struct vcache *avc = VTOAFS(inode);
2589 struct dcache *tdc;
2590 struct file *cacheFp = NULL;
2591 int code;
2592 unsigned int page_idx;
2593 loff_t offset;
2594 struct pagevec lrupv;
2595 struct afs_pagecopy_task *task;
2597 if (afs_linux_bypass_check(inode))
2598 return afs_linux_bypass_readpages(fp, mapping, page_list, num_pages);
2600 if (cacheDiskType == AFS_FCACHE_TYPE_MEM)
2601 return 0;
2603 /* No readpage (ex: tmpfs) , skip */
2604 if (cachefs_noreadpage)
2605 return 0;
2607 AFS_GLOCK();
2608 if ((code = afs_linux_VerifyVCache(avc, NULL))) {
2609 AFS_GUNLOCK();
2610 return code;
2613 ObtainWriteLock(&avc->lock, 912);
2614 AFS_GUNLOCK();
2616 task = afs_pagecopy_init_task();
2618 tdc = NULL;
2619 pagevec_init(&lrupv, 0);
2620 for (page_idx = 0; page_idx < num_pages; page_idx++) {
2621 struct page *page = list_entry(page_list->prev, struct page, lru);
2622 list_del(&page->lru);
2623 offset = page_offset(page);
2625 if (tdc && tdc->f.chunk != AFS_CHUNK(offset)) {
2626 AFS_GLOCK();
2627 ReleaseReadLock(&tdc->lock);
2628 afs_PutDCache(tdc);
2629 AFS_GUNLOCK();
2630 tdc = NULL;
2631 if (cacheFp)
2632 filp_close(cacheFp, NULL);
2635 if (!tdc) {
2636 AFS_GLOCK();
2637 if ((tdc = afs_FindDCache(avc, offset))) {
2638 ObtainReadLock(&tdc->lock);
2639 if (!hsame(avc->f.m.DataVersion, tdc->f.versionNo) ||
2640 (tdc->dflags & DFFetching)) {
2641 ReleaseReadLock(&tdc->lock);
2642 afs_PutDCache(tdc);
2643 tdc = NULL;
2646 AFS_GUNLOCK();
2647 if (tdc) {
2648 cacheFp = afs_linux_raw_open(&tdc->f.inode);
2649 if (!cacheFp->f_dentry->d_inode->i_mapping->a_ops->readpage) {
2650 cachefs_noreadpage = 1;
2651 goto out;
2656 if (tdc && !add_to_page_cache(page, mapping, page->index,
2657 GFP_KERNEL)) {
2658 get_page(page);
2659 if (!pagevec_add(&lrupv, page))
2660 __pagevec_lru_add_file(&lrupv);
2662 afs_linux_read_cache(cacheFp, page, tdc->f.chunk, &lrupv, task);
2664 put_page(page);
2666 if (pagevec_count(&lrupv))
2667 __pagevec_lru_add_file(&lrupv);
2669 out:
2670 if (tdc)
2671 filp_close(cacheFp, NULL);
2673 afs_pagecopy_put_task(task);
2675 AFS_GLOCK();
2676 if (tdc) {
2677 ReleaseReadLock(&tdc->lock);
2678 afs_PutDCache(tdc);
2681 ReleaseWriteLock(&avc->lock);
2682 AFS_GUNLOCK();
2683 return 0;
2686 /* Prepare an AFS vcache for writeback. Should be called with the vcache
2687 * locked */
2688 static inline int
2689 afs_linux_prepare_writeback(struct vcache *avc) {
2690 pid_t pid;
2691 struct pagewriter *pw;
2693 pid = MyPidxx2Pid(MyPidxx);
2694 /* Prevent recursion into the writeback code */
2695 spin_lock(&avc->pagewriter_lock);
2696 list_for_each_entry(pw, &avc->pagewriters, link) {
2697 if (pw->writer == pid) {
2698 spin_unlock(&avc->pagewriter_lock);
2699 return AOP_WRITEPAGE_ACTIVATE;
2702 spin_unlock(&avc->pagewriter_lock);
2704 /* Add ourselves to writer list */
2705 pw = osi_Alloc(sizeof(struct pagewriter));
2706 pw->writer = pid;
2707 spin_lock(&avc->pagewriter_lock);
2708 list_add_tail(&pw->link, &avc->pagewriters);
2709 spin_unlock(&avc->pagewriter_lock);
2711 return 0;
2714 static inline int
2715 afs_linux_dopartialwrite(struct vcache *avc, cred_t *credp) {
2716 struct vrequest *treq = NULL;
2717 int code = 0;
2719 if (!afs_CreateReq(&treq, credp)) {
2720 code = afs_DoPartialWrite(avc, treq);
2721 afs_DestroyReq(treq);
2724 return afs_convert_code(code);
2727 static inline void
2728 afs_linux_complete_writeback(struct vcache *avc) {
2729 struct pagewriter *pw, *store;
2730 pid_t pid;
2731 struct list_head tofree;
2733 INIT_LIST_HEAD(&tofree);
2734 pid = MyPidxx2Pid(MyPidxx);
2735 /* Remove ourselves from writer list */
2736 spin_lock(&avc->pagewriter_lock);
2737 list_for_each_entry_safe(pw, store, &avc->pagewriters, link) {
2738 if (pw->writer == pid) {
2739 list_del(&pw->link);
2740 /* osi_Free may sleep so we need to defer it */
2741 list_add_tail(&pw->link, &tofree);
2744 spin_unlock(&avc->pagewriter_lock);
2745 list_for_each_entry_safe(pw, store, &tofree, link) {
2746 list_del(&pw->link);
2747 osi_Free(pw, sizeof(struct pagewriter));
2751 /* Writeback a given page syncronously. Called with no AFS locks held */
2752 static int
2753 afs_linux_page_writeback(struct inode *ip, struct page *pp,
2754 unsigned long offset, unsigned int count,
2755 cred_t *credp)
2757 struct vcache *vcp = VTOAFS(ip);
2758 char *buffer;
2759 afs_offs_t base;
2760 int code = 0;
2761 struct uio tuio;
2762 struct iovec iovec;
2763 int f_flags = 0;
2765 memset(&tuio, 0, sizeof(tuio));
2766 memset(&iovec, 0, sizeof(iovec));
2768 buffer = kmap(pp) + offset;
2769 base = page_offset(pp) + offset;
2771 AFS_GLOCK();
2772 afs_Trace4(afs_iclSetp, CM_TRACE_UPDATEPAGE, ICL_TYPE_POINTER, vcp,
2773 ICL_TYPE_POINTER, pp, ICL_TYPE_INT32, page_count(pp),
2774 ICL_TYPE_INT32, 99999);
2776 setup_uio(&tuio, &iovec, buffer, base, count, UIO_WRITE, AFS_UIOSYS);
2778 code = afs_write(vcp, &tuio, f_flags, credp, 0);
2780 i_size_write(ip, vcp->f.m.Length);
2781 ip->i_blocks = ((vcp->f.m.Length + 1023) >> 10) << 1;
2783 code = code ? afs_convert_code(code) : count - tuio.uio_resid;
2785 afs_Trace4(afs_iclSetp, CM_TRACE_UPDATEPAGE, ICL_TYPE_POINTER, vcp,
2786 ICL_TYPE_POINTER, pp, ICL_TYPE_INT32, page_count(pp),
2787 ICL_TYPE_INT32, code);
2789 AFS_GUNLOCK();
2790 kunmap(pp);
2792 return code;
2795 static int
2796 afs_linux_writepage_sync(struct inode *ip, struct page *pp,
2797 unsigned long offset, unsigned int count)
2799 int code;
2800 int code1 = 0;
2801 struct vcache *vcp = VTOAFS(ip);
2802 cred_t *credp;
2804 /* Catch recursive writeback. This occurs if the kernel decides
2805 * writeback is required whilst we are writing to the cache, or
2806 * flushing to the server. When we're running syncronously (as
2807 * opposed to from writepage) we can't actually do anything about
2808 * this case - as we can't return AOP_WRITEPAGE_ACTIVATE to write()
2810 AFS_GLOCK();
2811 ObtainWriteLock(&vcp->lock, 532);
2812 afs_linux_prepare_writeback(vcp);
2813 ReleaseWriteLock(&vcp->lock);
2814 AFS_GUNLOCK();
2816 credp = crref();
2817 code = afs_linux_page_writeback(ip, pp, offset, count, credp);
2819 AFS_GLOCK();
2820 ObtainWriteLock(&vcp->lock, 533);
2821 if (code > 0)
2822 code1 = afs_linux_dopartialwrite(vcp, credp);
2823 afs_linux_complete_writeback(vcp);
2824 ReleaseWriteLock(&vcp->lock);
2825 AFS_GUNLOCK();
2826 crfree(credp);
2828 if (code1)
2829 return code1;
2831 return code;
2834 static int
2835 #ifdef AOP_WRITEPAGE_TAKES_WRITEBACK_CONTROL
2836 afs_linux_writepage(struct page *pp, struct writeback_control *wbc)
2837 #else
2838 afs_linux_writepage(struct page *pp)
2839 #endif
2841 struct address_space *mapping = pp->mapping;
2842 struct inode *inode;
2843 struct vcache *vcp;
2844 cred_t *credp;
2845 unsigned int to = PAGE_SIZE;
2846 loff_t isize;
2847 int code = 0;
2848 int code1 = 0;
2850 get_page(pp);
2852 inode = mapping->host;
2853 vcp = VTOAFS(inode);
2854 isize = i_size_read(inode);
2856 /* Don't defeat an earlier truncate */
2857 if (page_offset(pp) > isize) {
2858 set_page_writeback(pp);
2859 unlock_page(pp);
2860 goto done;
2863 AFS_GLOCK();
2864 ObtainWriteLock(&vcp->lock, 537);
2865 code = afs_linux_prepare_writeback(vcp);
2866 if (code == AOP_WRITEPAGE_ACTIVATE) {
2867 /* WRITEPAGE_ACTIVATE is the only return value that permits us
2868 * to return with the page still locked */
2869 ReleaseWriteLock(&vcp->lock);
2870 AFS_GUNLOCK();
2871 return code;
2874 /* Grab the creds structure currently held in the vnode, and
2875 * get a reference to it, in case it goes away ... */
2876 credp = vcp->cred;
2877 if (credp)
2878 crhold(credp);
2879 else
2880 credp = crref();
2881 ReleaseWriteLock(&vcp->lock);
2882 AFS_GUNLOCK();
2884 set_page_writeback(pp);
2886 SetPageUptodate(pp);
2888 /* We can unlock the page here, because it's protected by the
2889 * page_writeback flag. This should make us less vulnerable to
2890 * deadlocking in afs_write and afs_DoPartialWrite
2892 unlock_page(pp);
2894 /* If this is the final page, then just write the number of bytes that
2895 * are actually in it */
2896 if ((isize - page_offset(pp)) < to )
2897 to = isize - page_offset(pp);
2899 code = afs_linux_page_writeback(inode, pp, 0, to, credp);
2901 AFS_GLOCK();
2902 ObtainWriteLock(&vcp->lock, 538);
2904 /* As much as we might like to ignore a file server error here,
2905 * and just try again when we close(), unfortunately StoreAllSegments
2906 * will invalidate our chunks if the server returns a permanent error,
2907 * so we need to at least try and get that error back to the user
2909 if (code == to)
2910 code1 = afs_linux_dopartialwrite(vcp, credp);
2912 afs_linux_complete_writeback(vcp);
2913 ReleaseWriteLock(&vcp->lock);
2914 crfree(credp);
2915 AFS_GUNLOCK();
2917 done:
2918 end_page_writeback(pp);
2919 put_page(pp);
2921 if (code1)
2922 return code1;
2924 if (code == to)
2925 return 0;
2927 return code;
2930 /* afs_linux_permission
2931 * Check access rights - returns error if can't check or permission denied.
2933 static int
2934 #if defined(IOP_PERMISSION_TAKES_FLAGS)
2935 afs_linux_permission(struct inode *ip, int mode, unsigned int flags)
2936 #elif defined(IOP_PERMISSION_TAKES_NAMEIDATA)
2937 afs_linux_permission(struct inode *ip, int mode, struct nameidata *nd)
2938 #else
2939 afs_linux_permission(struct inode *ip, int mode)
2940 #endif
2942 int code;
2943 cred_t *credp;
2944 int tmp = 0;
2946 /* Check for RCU path walking */
2947 #if defined(IOP_PERMISSION_TAKES_FLAGS)
2948 if (flags & IPERM_FLAG_RCU)
2949 return -ECHILD;
2950 #elif defined(MAY_NOT_BLOCK)
2951 if (mode & MAY_NOT_BLOCK)
2952 return -ECHILD;
2953 #endif
2955 credp = crref();
2956 AFS_GLOCK();
2957 if (mode & MAY_EXEC)
2958 tmp |= VEXEC;
2959 if (mode & MAY_READ)
2960 tmp |= VREAD;
2961 if (mode & MAY_WRITE)
2962 tmp |= VWRITE;
2963 code = afs_access(VTOAFS(ip), tmp, credp);
2965 AFS_GUNLOCK();
2966 crfree(credp);
2967 return afs_convert_code(code);
2970 static int
2971 afs_linux_commit_write(struct file *file, struct page *page, unsigned offset,
2972 unsigned to)
2974 int code;
2975 struct inode *inode = FILE_INODE(file);
2976 loff_t pagebase = page_offset(page);
2978 if (i_size_read(inode) < (pagebase + offset))
2979 i_size_write(inode, pagebase + offset);
2981 if (PageChecked(page)) {
2982 SetPageUptodate(page);
2983 ClearPageChecked(page);
2986 code = afs_linux_writepage_sync(inode, page, offset, to - offset);
2988 return code;
2991 static int
2992 afs_linux_prepare_write(struct file *file, struct page *page, unsigned from,
2993 unsigned to)
2996 /* http://kerneltrap.org/node/4941 details the expected behaviour of
2997 * prepare_write. Essentially, if the page exists within the file,
2998 * and is not being fully written, then we should populate it.
3001 if (!PageUptodate(page)) {
3002 loff_t pagebase = page_offset(page);
3003 loff_t isize = i_size_read(page->mapping->host);
3005 /* Is the location we are writing to beyond the end of the file? */
3006 if (pagebase >= isize ||
3007 ((from == 0) && (pagebase + to) >= isize)) {
3008 zero_user_segments(page, 0, from, to, PAGE_SIZE);
3009 SetPageChecked(page);
3010 /* Are we we writing a full page */
3011 } else if (from == 0 && to == PAGE_SIZE) {
3012 SetPageChecked(page);
3013 /* Is the page readable, if it's wronly, we don't care, because we're
3014 * not actually going to read from it ... */
3015 } else if ((file->f_flags && O_ACCMODE) != O_WRONLY) {
3016 /* We don't care if fillpage fails, because if it does the page
3017 * won't be marked as up to date
3019 afs_linux_fillpage(file, page);
3022 return 0;
3025 #if defined(STRUCT_ADDRESS_SPACE_OPERATIONS_HAS_WRITE_BEGIN)
3026 static int
3027 afs_linux_write_end(struct file *file, struct address_space *mapping,
3028 loff_t pos, unsigned len, unsigned copied,
3029 struct page *page, void *fsdata)
3031 int code;
3032 unsigned int from = pos & (PAGE_SIZE - 1);
3034 code = afs_linux_commit_write(file, page, from, from + copied);
3036 unlock_page(page);
3037 put_page(page);
3038 return code;
3041 static int
3042 afs_linux_write_begin(struct file *file, struct address_space *mapping,
3043 loff_t pos, unsigned len, unsigned flags,
3044 struct page **pagep, void **fsdata)
3046 struct page *page;
3047 pgoff_t index = pos >> PAGE_SHIFT;
3048 unsigned int from = pos & (PAGE_SIZE - 1);
3049 int code;
3051 page = grab_cache_page_write_begin(mapping, index, flags);
3052 *pagep = page;
3054 code = afs_linux_prepare_write(file, page, from, from + len);
3055 if (code) {
3056 unlock_page(page);
3057 put_page(page);
3060 return code;
3062 #endif
3064 #ifndef STRUCT_DENTRY_OPERATIONS_HAS_D_AUTOMOUNT
3065 static void *
3066 afs_linux_dir_follow_link(struct dentry *dentry, struct nameidata *nd)
3068 struct dentry **dpp;
3069 struct dentry *target;
3071 if (current->total_link_count > 0) {
3072 /* avoid symlink resolution limits when resolving; we cannot contribute to
3073 * an infinite symlink loop */
3074 /* only do this for follow_link when total_link_count is positive to be
3075 * on the safe side; there is at least one code path in the Linux
3076 * kernel where it seems like it may be possible to get here without
3077 * total_link_count getting incremented. it is not clear on how that
3078 * path is actually reached, but guard against it just to be safe */
3079 current->total_link_count--;
3082 target = canonical_dentry(dentry->d_inode);
3084 # ifdef STRUCT_NAMEIDATA_HAS_PATH
3085 dpp = &nd->path.dentry;
3086 # else
3087 dpp = &nd->dentry;
3088 # endif
3090 dput(*dpp);
3092 if (target) {
3093 *dpp = target;
3094 } else {
3095 *dpp = dget(dentry);
3098 nd->last_type = LAST_BIND;
3100 return NULL;
3102 #endif /* !STRUCT_DENTRY_OPERATIONS_HAS_D_AUTOMOUNT */
3105 static struct inode_operations afs_file_iops = {
3106 .permission = afs_linux_permission,
3107 .getattr = afs_linux_getattr,
3108 .setattr = afs_notify_change,
3111 static struct address_space_operations afs_file_aops = {
3112 .readpage = afs_linux_readpage,
3113 .readpages = afs_linux_readpages,
3114 .writepage = afs_linux_writepage,
3115 #if defined (STRUCT_ADDRESS_SPACE_OPERATIONS_HAS_WRITE_BEGIN)
3116 .write_begin = afs_linux_write_begin,
3117 .write_end = afs_linux_write_end,
3118 #else
3119 .commit_write = afs_linux_commit_write,
3120 .prepare_write = afs_linux_prepare_write,
3121 #endif
3125 /* Separate ops vector for directories. Linux 2.2 tests type of inode
3126 * by what sort of operation is allowed.....
3129 static struct inode_operations afs_dir_iops = {
3130 .setattr = afs_notify_change,
3131 .create = afs_linux_create,
3132 .lookup = afs_linux_lookup,
3133 .link = afs_linux_link,
3134 .unlink = afs_linux_unlink,
3135 .symlink = afs_linux_symlink,
3136 .mkdir = afs_linux_mkdir,
3137 .rmdir = afs_linux_rmdir,
3138 .rename = afs_linux_rename,
3139 .getattr = afs_linux_getattr,
3140 .permission = afs_linux_permission,
3141 #ifndef STRUCT_DENTRY_OPERATIONS_HAS_D_AUTOMOUNT
3142 .follow_link = afs_linux_dir_follow_link,
3143 #endif
3146 /* We really need a separate symlink set of ops, since do_follow_link()
3147 * determines if it _is_ a link by checking if the follow_link op is set.
3149 #if defined(USABLE_KERNEL_PAGE_SYMLINK_CACHE)
3150 static int
3151 afs_symlink_filler(struct file *file, struct page *page)
3153 struct inode *ip = (struct inode *)page->mapping->host;
3154 char *p = (char *)kmap(page);
3155 int code;
3157 AFS_GLOCK();
3158 code = afs_linux_ireadlink(ip, p, PAGE_SIZE, AFS_UIOSYS);
3159 AFS_GUNLOCK();
3161 if (code < 0)
3162 goto fail;
3163 p[code] = '\0'; /* null terminate? */
3165 SetPageUptodate(page);
3166 kunmap(page);
3167 unlock_page(page);
3168 return 0;
3170 fail:
3171 SetPageError(page);
3172 kunmap(page);
3173 unlock_page(page);
3174 return code;
3177 static struct address_space_operations afs_symlink_aops = {
3178 .readpage = afs_symlink_filler
3180 #endif /* USABLE_KERNEL_PAGE_SYMLINK_CACHE */
3182 static struct inode_operations afs_symlink_iops = {
3183 #if defined(USABLE_KERNEL_PAGE_SYMLINK_CACHE)
3184 .readlink = page_readlink,
3185 # if defined(HAVE_LINUX_PAGE_GET_LINK)
3186 .get_link = page_get_link,
3187 # elif defined(HAVE_LINUX_PAGE_FOLLOW_LINK)
3188 .follow_link = page_follow_link,
3189 # else
3190 .follow_link = page_follow_link_light,
3191 .put_link = page_put_link,
3192 # endif
3193 #else /* !defined(USABLE_KERNEL_PAGE_SYMLINK_CACHE) */
3194 .readlink = afs_linux_readlink,
3195 .follow_link = afs_linux_follow_link,
3196 .put_link = afs_linux_put_link,
3197 #endif /* USABLE_KERNEL_PAGE_SYMLINK_CACHE */
3198 .setattr = afs_notify_change,
3201 void
3202 afs_fill_inode(struct inode *ip, struct vattr *vattr)
3204 if (vattr)
3205 vattr2inode(ip, vattr);
3207 #ifdef STRUCT_ADDRESS_SPACE_HAS_BACKING_DEV_INFO
3208 ip->i_mapping->backing_dev_info = afs_backing_dev_info;
3209 #endif
3210 /* Reset ops if symlink or directory. */
3211 if (S_ISREG(ip->i_mode)) {
3212 ip->i_op = &afs_file_iops;
3213 ip->i_fop = &afs_file_fops;
3214 ip->i_data.a_ops = &afs_file_aops;
3216 } else if (S_ISDIR(ip->i_mode)) {
3217 ip->i_op = &afs_dir_iops;
3218 ip->i_fop = &afs_dir_fops;
3220 } else if (S_ISLNK(ip->i_mode)) {
3221 ip->i_op = &afs_symlink_iops;
3222 #if defined(HAVE_LINUX_INODE_NOHIGHMEM)
3223 inode_nohighmem(ip);
3224 #endif
3225 #if defined(USABLE_KERNEL_PAGE_SYMLINK_CACHE)
3226 ip->i_data.a_ops = &afs_symlink_aops;
3227 ip->i_mapping = &ip->i_data;
3228 #endif