2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
25 #include "xfs_trans.h"
26 #include "xfs_dmapi.h"
27 #include "xfs_mount.h"
28 #include "xfs_bmap_btree.h"
29 #include "xfs_alloc_btree.h"
30 #include "xfs_ialloc_btree.h"
31 #include "xfs_alloc.h"
32 #include "xfs_btree.h"
33 #include "xfs_attr_sf.h"
34 #include "xfs_dir2_sf.h"
35 #include "xfs_dinode.h"
36 #include "xfs_inode.h"
37 #include "xfs_error.h"
39 #include "xfs_ioctl32.h"
41 #include <linux/dcache.h>
42 #include <linux/smp_lock.h>
44 static struct vm_operations_struct xfs_file_vm_ops
;
45 #ifdef CONFIG_XFS_DMAPI
46 static struct vm_operations_struct xfs_dmapi_file_vm_ops
;
57 struct iovec iov
= {buf
, count
};
58 struct file
*file
= iocb
->ki_filp
;
59 bhv_vnode_t
*vp
= vn_from_inode(file
->f_dentry
->d_inode
);
61 BUG_ON(iocb
->ki_pos
!= pos
);
62 if (unlikely(file
->f_flags
& O_DIRECT
))
63 ioflags
|= IO_ISDIRECT
;
64 return bhv_vop_read(vp
, iocb
, &iov
, 1, &iocb
->ki_pos
, ioflags
, NULL
);
74 return __xfs_file_read(iocb
, buf
, IO_ISAIO
, count
, pos
);
78 xfs_file_aio_read_invis(
84 return __xfs_file_read(iocb
, buf
, IO_ISAIO
|IO_INVIS
, count
, pos
);
90 const char __user
*buf
,
95 struct iovec iov
= {(void __user
*)buf
, count
};
96 struct file
*file
= iocb
->ki_filp
;
97 struct inode
*inode
= file
->f_mapping
->host
;
98 bhv_vnode_t
*vp
= vn_from_inode(inode
);
100 BUG_ON(iocb
->ki_pos
!= pos
);
101 if (unlikely(file
->f_flags
& O_DIRECT
))
102 ioflags
|= IO_ISDIRECT
;
103 return bhv_vop_write(vp
, iocb
, &iov
, 1, &iocb
->ki_pos
, ioflags
, NULL
);
109 const char __user
*buf
,
113 return __xfs_file_write(iocb
, buf
, IO_ISAIO
, count
, pos
);
117 xfs_file_aio_write_invis(
119 const char __user
*buf
,
123 return __xfs_file_write(iocb
, buf
, IO_ISAIO
|IO_INVIS
, count
, pos
);
126 STATIC
inline ssize_t
129 const struct iovec
*iov
,
131 unsigned long nr_segs
,
134 struct inode
*inode
= file
->f_mapping
->host
;
135 bhv_vnode_t
*vp
= vn_from_inode(inode
);
139 init_sync_kiocb(&kiocb
, file
);
140 kiocb
.ki_pos
= *ppos
;
142 if (unlikely(file
->f_flags
& O_DIRECT
))
143 ioflags
|= IO_ISDIRECT
;
144 rval
= bhv_vop_read(vp
, &kiocb
, iov
, nr_segs
,
145 &kiocb
.ki_pos
, ioflags
, NULL
);
147 *ppos
= kiocb
.ki_pos
;
154 const struct iovec
*iov
,
155 unsigned long nr_segs
,
158 return __xfs_file_readv(file
, iov
, 0, nr_segs
, ppos
);
162 xfs_file_readv_invis(
164 const struct iovec
*iov
,
165 unsigned long nr_segs
,
168 return __xfs_file_readv(file
, iov
, IO_INVIS
, nr_segs
, ppos
);
171 STATIC
inline ssize_t
174 const struct iovec
*iov
,
176 unsigned long nr_segs
,
179 struct inode
*inode
= file
->f_mapping
->host
;
180 bhv_vnode_t
*vp
= vn_from_inode(inode
);
184 init_sync_kiocb(&kiocb
, file
);
185 kiocb
.ki_pos
= *ppos
;
186 if (unlikely(file
->f_flags
& O_DIRECT
))
187 ioflags
|= IO_ISDIRECT
;
189 rval
= bhv_vop_write(vp
, &kiocb
, iov
, nr_segs
,
190 &kiocb
.ki_pos
, ioflags
, NULL
);
192 *ppos
= kiocb
.ki_pos
;
199 const struct iovec
*iov
,
200 unsigned long nr_segs
,
203 return __xfs_file_writev(file
, iov
, 0, nr_segs
, ppos
);
207 xfs_file_writev_invis(
209 const struct iovec
*iov
,
210 unsigned long nr_segs
,
213 return __xfs_file_writev(file
, iov
, IO_INVIS
, nr_segs
, ppos
);
224 return bhv_vop_sendfile(vn_from_inode(filp
->f_dentry
->d_inode
),
225 filp
, pos
, 0, count
, actor
, target
, NULL
);
229 xfs_file_sendfile_invis(
236 return bhv_vop_sendfile(vn_from_inode(filp
->f_dentry
->d_inode
),
237 filp
, pos
, IO_INVIS
, count
, actor
, target
, NULL
);
241 xfs_file_splice_read(
244 struct pipe_inode_info
*pipe
,
248 return bhv_vop_splice_read(vn_from_inode(infilp
->f_dentry
->d_inode
),
249 infilp
, ppos
, pipe
, len
, flags
, 0, NULL
);
253 xfs_file_splice_read_invis(
256 struct pipe_inode_info
*pipe
,
260 return bhv_vop_splice_read(vn_from_inode(infilp
->f_dentry
->d_inode
),
261 infilp
, ppos
, pipe
, len
, flags
, IO_INVIS
,
266 xfs_file_splice_write(
267 struct pipe_inode_info
*pipe
,
268 struct file
*outfilp
,
273 return bhv_vop_splice_write(vn_from_inode(outfilp
->f_dentry
->d_inode
),
274 pipe
, outfilp
, ppos
, len
, flags
, 0, NULL
);
278 xfs_file_splice_write_invis(
279 struct pipe_inode_info
*pipe
,
280 struct file
*outfilp
,
285 return bhv_vop_splice_write(vn_from_inode(outfilp
->f_dentry
->d_inode
),
286 pipe
, outfilp
, ppos
, len
, flags
, IO_INVIS
,
295 if (!(filp
->f_flags
& O_LARGEFILE
) && i_size_read(inode
) > MAX_NON_LFS
)
297 return -bhv_vop_open(vn_from_inode(inode
), NULL
);
305 return -bhv_vop_close(vn_from_inode(filp
->f_dentry
->d_inode
), 0,
306 file_count(filp
) > 1 ? L_FALSE
: L_TRUE
, NULL
);
314 bhv_vnode_t
*vp
= vn_from_inode(inode
);
317 return -bhv_vop_release(vp
);
324 struct dentry
*dentry
,
327 bhv_vnode_t
*vp
= vn_from_inode(dentry
->d_inode
);
328 int flags
= FSYNC_WAIT
;
334 return -bhv_vop_fsync(vp
, flags
, NULL
, (xfs_off_t
)0, (xfs_off_t
)-1);
337 #ifdef CONFIG_XFS_DMAPI
340 struct vm_area_struct
*area
,
341 unsigned long address
,
344 struct inode
*inode
= area
->vm_file
->f_dentry
->d_inode
;
345 bhv_vnode_t
*vp
= vn_from_inode(inode
);
347 ASSERT_ALWAYS(vp
->v_vfsp
->vfs_flag
& VFS_DMI
);
348 if (XFS_SEND_MMAP(XFS_VFSTOM(vp
->v_vfsp
), area
, 0))
350 return filemap_nopage(area
, address
, type
);
352 #endif /* CONFIG_XFS_DMAPI */
361 bhv_vnode_t
*vp
= vn_from_inode(filp
->f_dentry
->d_inode
);
366 int namelen
, size
= 0;
367 size_t rlen
= PAGE_CACHE_SIZE
;
368 xfs_off_t start_offset
, curr_offset
;
369 xfs_dirent_t
*dbp
= NULL
;
371 /* Try fairly hard to get memory */
373 if ((read_buf
= (caddr_t
)kmalloc(rlen
, GFP_KERNEL
)))
376 } while (rlen
>= 1024);
378 if (read_buf
== NULL
)
382 uio
.uio_segflg
= UIO_SYSSPACE
;
383 curr_offset
= filp
->f_pos
;
384 if (filp
->f_pos
!= 0x7fffffff)
385 uio
.uio_offset
= filp
->f_pos
;
387 uio
.uio_offset
= 0xffffffff;
390 uio
.uio_resid
= iov
.iov_len
= rlen
;
391 iov
.iov_base
= read_buf
;
394 start_offset
= uio
.uio_offset
;
396 error
= bhv_vop_readdir(vp
, &uio
, NULL
, &eof
);
397 if ((uio
.uio_offset
== start_offset
) || error
) {
402 size
= rlen
- uio
.uio_resid
;
403 dbp
= (xfs_dirent_t
*)read_buf
;
405 namelen
= strlen(dbp
->d_name
);
407 if (filldir(dirent
, dbp
->d_name
, namelen
,
408 (loff_t
) curr_offset
& 0x7fffffff,
413 size
-= dbp
->d_reclen
;
414 curr_offset
= (loff_t
)dbp
->d_off
/* & 0x7fffffff */;
415 dbp
= (xfs_dirent_t
*)((char *)dbp
+ dbp
->d_reclen
);
421 filp
->f_pos
= uio
.uio_offset
& 0x7fffffff;
423 filp
->f_pos
= curr_offset
;
433 struct vm_area_struct
*vma
)
435 vma
->vm_ops
= &xfs_file_vm_ops
;
437 #ifdef CONFIG_XFS_DMAPI
438 if (vn_from_inode(filp
->f_dentry
->d_inode
)->v_vfsp
->vfs_flag
& VFS_DMI
)
439 vma
->vm_ops
= &xfs_dmapi_file_vm_ops
;
440 #endif /* CONFIG_XFS_DMAPI */
453 struct inode
*inode
= filp
->f_dentry
->d_inode
;
454 bhv_vnode_t
*vp
= vn_from_inode(inode
);
456 error
= bhv_vop_ioctl(vp
, inode
, filp
, 0, cmd
, (void __user
*)p
);
459 /* NOTE: some of the ioctl's return positive #'s as a
460 * byte count indicating success, such as
461 * readlink_by_handle. So we don't "sign flip"
462 * like most other routines. This means true
463 * errors need to be returned as a negative value.
469 xfs_file_ioctl_invis(
475 struct inode
*inode
= filp
->f_dentry
->d_inode
;
476 bhv_vnode_t
*vp
= vn_from_inode(inode
);
478 error
= bhv_vop_ioctl(vp
, inode
, filp
, IO_INVIS
, cmd
, (void __user
*)p
);
481 /* NOTE: some of the ioctl's return positive #'s as a
482 * byte count indicating success, such as
483 * readlink_by_handle. So we don't "sign flip"
484 * like most other routines. This means true
485 * errors need to be returned as a negative value.
490 #ifdef CONFIG_XFS_DMAPI
491 #ifdef HAVE_VMOP_MPROTECT
494 struct vm_area_struct
*vma
,
495 unsigned int newflags
)
497 bhv_vnode_t
*vp
= vn_from_inode(vma
->vm_file
->f_dentry
->d_inode
);
500 if (vp
->v_vfsp
->vfs_flag
& VFS_DMI
) {
501 if ((vma
->vm_flags
& VM_MAYSHARE
) &&
502 (newflags
& VM_WRITE
) && !(vma
->vm_flags
& VM_WRITE
)) {
503 xfs_mount_t
*mp
= XFS_VFSTOM(vp
->v_vfsp
);
505 error
= XFS_SEND_MMAP(mp
, vma
, VM_WRITE
);
510 #endif /* HAVE_VMOP_MPROTECT */
511 #endif /* CONFIG_XFS_DMAPI */
513 #ifdef HAVE_FOP_OPEN_EXEC
514 /* If the user is attempting to execute a file that is offline then
515 * we have to trigger a DMAPI READ event before the file is marked as busy
516 * otherwise the invisible I/O will not be able to write to the file to bring
523 bhv_vnode_t
*vp
= vn_from_inode(inode
);
525 if (unlikely(vp
->v_vfsp
->vfs_flag
& VFS_DMI
)) {
526 xfs_mount_t
*mp
= XFS_VFSTOM(vp
->v_vfsp
);
527 xfs_inode_t
*ip
= xfs_vtoi(vp
);
531 if (DM_EVENT_ENABLED(vp
->v_vfsp
, ip
, DM_EVENT_READ
))
532 return -XFS_SEND_DATA(mp
, DM_EVENT_READ
, vp
,
537 #endif /* HAVE_FOP_OPEN_EXEC */
539 const struct file_operations xfs_file_operations
= {
540 .llseek
= generic_file_llseek
,
541 .read
= do_sync_read
,
542 .write
= do_sync_write
,
543 .readv
= xfs_file_readv
,
544 .writev
= xfs_file_writev
,
545 .aio_read
= xfs_file_aio_read
,
546 .aio_write
= xfs_file_aio_write
,
547 .sendfile
= xfs_file_sendfile
,
548 .splice_read
= xfs_file_splice_read
,
549 .splice_write
= xfs_file_splice_write
,
550 .unlocked_ioctl
= xfs_file_ioctl
,
552 .compat_ioctl
= xfs_file_compat_ioctl
,
554 .mmap
= xfs_file_mmap
,
555 .open
= xfs_file_open
,
556 .flush
= xfs_file_close
,
557 .release
= xfs_file_release
,
558 .fsync
= xfs_file_fsync
,
559 #ifdef HAVE_FOP_OPEN_EXEC
560 .open_exec
= xfs_file_open_exec
,
564 const struct file_operations xfs_invis_file_operations
= {
565 .llseek
= generic_file_llseek
,
566 .read
= do_sync_read
,
567 .write
= do_sync_write
,
568 .readv
= xfs_file_readv_invis
,
569 .writev
= xfs_file_writev_invis
,
570 .aio_read
= xfs_file_aio_read_invis
,
571 .aio_write
= xfs_file_aio_write_invis
,
572 .sendfile
= xfs_file_sendfile_invis
,
573 .splice_read
= xfs_file_splice_read_invis
,
574 .splice_write
= xfs_file_splice_write_invis
,
575 .unlocked_ioctl
= xfs_file_ioctl_invis
,
577 .compat_ioctl
= xfs_file_compat_invis_ioctl
,
579 .mmap
= xfs_file_mmap
,
580 .open
= xfs_file_open
,
581 .flush
= xfs_file_close
,
582 .release
= xfs_file_release
,
583 .fsync
= xfs_file_fsync
,
587 const struct file_operations xfs_dir_file_operations
= {
588 .read
= generic_read_dir
,
589 .readdir
= xfs_file_readdir
,
590 .unlocked_ioctl
= xfs_file_ioctl
,
592 .compat_ioctl
= xfs_file_compat_ioctl
,
594 .fsync
= xfs_file_fsync
,
597 static struct vm_operations_struct xfs_file_vm_ops
= {
598 .nopage
= filemap_nopage
,
599 .populate
= filemap_populate
,
602 #ifdef CONFIG_XFS_DMAPI
603 static struct vm_operations_struct xfs_dmapi_file_vm_ops
= {
604 .nopage
= xfs_vm_nopage
,
605 .populate
= filemap_populate
,
606 #ifdef HAVE_VMOP_MPROTECT
607 .mprotect
= xfs_vm_mprotect
,
610 #endif /* CONFIG_XFS_DMAPI */