4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2011, Lawrence Livermore National Security, LLC.
23 * Copyright (c) 2015 by Chunwei Chen. All rights reserved.
28 #include <linux/compat.h>
31 #include <sys/dmu_objset.h>
32 #include <sys/zfs_znode.h>
33 #include <sys/zfs_vfsops.h>
34 #include <sys/zfs_vnops.h>
35 #include <sys/zfs_project.h>
36 #if defined(HAVE_VFS_SET_PAGE_DIRTY_NOBUFFERS) || \
37 defined(HAVE_VFS_FILEMAP_DIRTY_FOLIO)
38 #include <linux/pagemap.h>
40 #ifdef HAVE_VFS_FILEMAP_DIRTY_FOLIO
41 #include <linux/writeback.h>
45 * When using fallocate(2) to preallocate space, inflate the requested
46 * capacity check by 10% to account for the required metadata blocks.
48 static unsigned int zfs_fallocate_reserve_percent
= 110;
51 zpl_open(struct inode
*ip
, struct file
*filp
)
55 fstrans_cookie_t cookie
;
57 error
= generic_file_open(ip
, filp
);
62 cookie
= spl_fstrans_mark();
63 error
= -zfs_open(ip
, filp
->f_mode
, filp
->f_flags
, cr
);
64 spl_fstrans_unmark(cookie
);
66 ASSERT3S(error
, <=, 0);
72 zpl_release(struct inode
*ip
, struct file
*filp
)
76 fstrans_cookie_t cookie
;
78 cookie
= spl_fstrans_mark();
79 if (ITOZ(ip
)->z_atime_dirty
)
80 zfs_mark_inode_dirty(ip
);
83 error
= -zfs_close(ip
, filp
->f_flags
, cr
);
84 spl_fstrans_unmark(cookie
);
86 ASSERT3S(error
, <=, 0);
92 zpl_iterate(struct file
*filp
, zpl_dir_context_t
*ctx
)
96 fstrans_cookie_t cookie
;
99 cookie
= spl_fstrans_mark();
100 error
= -zfs_readdir(file_inode(filp
), ctx
, cr
);
101 spl_fstrans_unmark(cookie
);
103 ASSERT3S(error
, <=, 0);
108 #if !defined(HAVE_VFS_ITERATE) && !defined(HAVE_VFS_ITERATE_SHARED)
110 zpl_readdir(struct file
*filp
, void *dirent
, filldir_t filldir
)
112 zpl_dir_context_t ctx
=
113 ZPL_DIR_CONTEXT_INIT(dirent
, filldir
, filp
->f_pos
);
116 error
= zpl_iterate(filp
, &ctx
);
117 filp
->f_pos
= ctx
.pos
;
121 #endif /* !HAVE_VFS_ITERATE && !HAVE_VFS_ITERATE_SHARED */
123 #if defined(HAVE_FSYNC_WITHOUT_DENTRY)
125 * Linux 2.6.35 - 3.0 API,
126 * As of 2.6.35 the dentry argument to the fops->fsync() hook was deemed
127 * redundant. The dentry is still accessible via filp->f_path.dentry,
128 * and we are guaranteed that filp will never be NULL.
131 zpl_fsync(struct file
*filp
, int datasync
)
133 struct inode
*inode
= filp
->f_mapping
->host
;
136 fstrans_cookie_t cookie
;
139 cookie
= spl_fstrans_mark();
140 error
= -zfs_fsync(ITOZ(inode
), datasync
, cr
);
141 spl_fstrans_unmark(cookie
);
143 ASSERT3S(error
, <=, 0);
148 #ifdef HAVE_FILE_AIO_FSYNC
150 zpl_aio_fsync(struct kiocb
*kiocb
, int datasync
)
152 return (zpl_fsync(kiocb
->ki_filp
, datasync
));
156 #elif defined(HAVE_FSYNC_RANGE)
159 * As of 3.1 the responsibility to call filemap_write_and_wait_range() has
160 * been pushed down in to the .fsync() vfs hook. Additionally, the i_mutex
161 * lock is no longer held by the caller, for zfs we don't require the lock
162 * to be held so we don't acquire it.
165 zpl_fsync(struct file
*filp
, loff_t start
, loff_t end
, int datasync
)
167 struct inode
*inode
= filp
->f_mapping
->host
;
168 znode_t
*zp
= ITOZ(inode
);
169 zfsvfs_t
*zfsvfs
= ITOZSB(inode
);
172 fstrans_cookie_t cookie
;
175 * The variables z_sync_writes_cnt and z_async_writes_cnt work in
176 * tandem so that sync writes can detect if there are any non-sync
177 * writes going on and vice-versa. The "vice-versa" part to this logic
178 * is located in zfs_putpage() where non-sync writes check if there are
179 * any ongoing sync writes. If any sync and non-sync writes overlap,
180 * we do a commit to complete the non-sync writes since the latter can
181 * potentially take several seconds to complete and thus block sync
182 * writes in the upcoming call to filemap_write_and_wait_range().
184 atomic_inc_32(&zp
->z_sync_writes_cnt
);
186 * If the following check does not detect an overlapping non-sync write
187 * (say because it's just about to start), then it is guaranteed that
188 * the non-sync write will detect this sync write. This is because we
189 * always increment z_sync_writes_cnt / z_async_writes_cnt before doing
190 * the check on z_async_writes_cnt / z_sync_writes_cnt here and in
191 * zfs_putpage() respectively.
193 if (atomic_load_32(&zp
->z_async_writes_cnt
) > 0) {
195 zil_commit(zfsvfs
->z_log
, zp
->z_id
);
199 error
= filemap_write_and_wait_range(inode
->i_mapping
, start
, end
);
202 * The sync write is not complete yet but we decrement
203 * z_sync_writes_cnt since zfs_fsync() increments and decrements
204 * it internally. If a non-sync write starts just after the decrement
205 * operation but before we call zfs_fsync(), it may not detect this
206 * overlapping sync write but it does not matter since we have already
207 * gone past filemap_write_and_wait_range() and we won't block due to
208 * the non-sync write.
210 atomic_dec_32(&zp
->z_sync_writes_cnt
);
216 cookie
= spl_fstrans_mark();
217 error
= -zfs_fsync(zp
, datasync
, cr
);
218 spl_fstrans_unmark(cookie
);
220 ASSERT3S(error
, <=, 0);
225 #ifdef HAVE_FILE_AIO_FSYNC
227 zpl_aio_fsync(struct kiocb
*kiocb
, int datasync
)
229 return (zpl_fsync(kiocb
->ki_filp
, kiocb
->ki_pos
, -1, datasync
));
234 #error "Unsupported fops->fsync() implementation"
238 zfs_io_flags(struct kiocb
*kiocb
)
242 #if defined(IOCB_DSYNC)
243 if (kiocb
->ki_flags
& IOCB_DSYNC
)
246 #if defined(IOCB_SYNC)
247 if (kiocb
->ki_flags
& IOCB_SYNC
)
250 #if defined(IOCB_APPEND)
251 if (kiocb
->ki_flags
& IOCB_APPEND
)
254 #if defined(IOCB_DIRECT)
255 if (kiocb
->ki_flags
& IOCB_DIRECT
)
262 * If relatime is enabled, call file_accessed() if zfs_relatime_need_update()
263 * is true. This is needed since datasets with inherited "relatime" property
264 * aren't necessarily mounted with the MNT_RELATIME flag (e.g. after
265 * `zfs set relatime=...`), which is what relatime test in VFS by
266 * relatime_need_update() is based on.
269 zpl_file_accessed(struct file
*filp
)
271 struct inode
*ip
= filp
->f_mapping
->host
;
273 if (!IS_NOATIME(ip
) && ITOZSB(ip
)->z_relatime
) {
274 if (zfs_relatime_need_update(ip
))
281 #if defined(HAVE_VFS_RW_ITERATE)
284 * When HAVE_VFS_IOV_ITER is defined the iov_iter structure supports
285 * iovecs, kvevs, bvecs and pipes, plus all the required interfaces to
286 * manipulate the iov_iter are available. In which case the full iov_iter
287 * can be attached to the uio and correctly handled in the lower layers.
288 * Otherwise, for older kernels extract the iovec and pass it instead.
291 zpl_uio_init(zfs_uio_t
*uio
, struct kiocb
*kiocb
, struct iov_iter
*to
,
292 loff_t pos
, ssize_t count
, size_t skip
)
294 #if defined(HAVE_VFS_IOV_ITER)
295 zfs_uio_iov_iter_init(uio
, to
, pos
, count
, skip
);
297 #ifdef HAVE_IOV_ITER_TYPE
298 zfs_uio_iovec_init(uio
, to
->iov
, to
->nr_segs
, pos
,
299 iov_iter_type(to
) & ITER_KVEC
? UIO_SYSSPACE
: UIO_USERSPACE
,
302 zfs_uio_iovec_init(uio
, to
->iov
, to
->nr_segs
, pos
,
303 to
->type
& ITER_KVEC
? UIO_SYSSPACE
: UIO_USERSPACE
,
310 zpl_iter_read(struct kiocb
*kiocb
, struct iov_iter
*to
)
313 fstrans_cookie_t cookie
;
314 struct file
*filp
= kiocb
->ki_filp
;
315 ssize_t count
= iov_iter_count(to
);
318 zpl_uio_init(&uio
, kiocb
, to
, kiocb
->ki_pos
, count
, 0);
321 cookie
= spl_fstrans_mark();
323 int error
= -zfs_read(ITOZ(filp
->f_mapping
->host
), &uio
,
324 filp
->f_flags
| zfs_io_flags(kiocb
), cr
);
326 spl_fstrans_unmark(cookie
);
332 ssize_t read
= count
- uio
.uio_resid
;
333 kiocb
->ki_pos
+= read
;
335 zpl_file_accessed(filp
);
340 static inline ssize_t
341 zpl_generic_write_checks(struct kiocb
*kiocb
, struct iov_iter
*from
,
344 #ifdef HAVE_GENERIC_WRITE_CHECKS_KIOCB
345 ssize_t ret
= generic_write_checks(kiocb
, from
);
351 struct file
*file
= kiocb
->ki_filp
;
352 struct address_space
*mapping
= file
->f_mapping
;
353 struct inode
*ip
= mapping
->host
;
354 int isblk
= S_ISBLK(ip
->i_mode
);
356 *countp
= iov_iter_count(from
);
357 ssize_t ret
= generic_write_checks(file
, &kiocb
->ki_pos
, countp
, isblk
);
366 zpl_iter_write(struct kiocb
*kiocb
, struct iov_iter
*from
)
369 fstrans_cookie_t cookie
;
370 struct file
*filp
= kiocb
->ki_filp
;
371 struct inode
*ip
= filp
->f_mapping
->host
;
376 ret
= zpl_generic_write_checks(kiocb
, from
, &count
);
380 zpl_uio_init(&uio
, kiocb
, from
, kiocb
->ki_pos
, count
, from
->iov_offset
);
383 cookie
= spl_fstrans_mark();
385 int error
= -zfs_write(ITOZ(ip
), &uio
,
386 filp
->f_flags
| zfs_io_flags(kiocb
), cr
);
388 spl_fstrans_unmark(cookie
);
394 ssize_t wrote
= count
- uio
.uio_resid
;
395 kiocb
->ki_pos
+= wrote
;
400 #else /* !HAVE_VFS_RW_ITERATE */
403 zpl_aio_read(struct kiocb
*kiocb
, const struct iovec
*iov
,
404 unsigned long nr_segs
, loff_t pos
)
407 fstrans_cookie_t cookie
;
408 struct file
*filp
= kiocb
->ki_filp
;
412 ret
= generic_segment_checks(iov
, &nr_segs
, &count
, VERIFY_WRITE
);
417 zfs_uio_iovec_init(&uio
, iov
, nr_segs
, kiocb
->ki_pos
, UIO_USERSPACE
,
421 cookie
= spl_fstrans_mark();
423 int error
= -zfs_read(ITOZ(filp
->f_mapping
->host
), &uio
,
424 filp
->f_flags
| zfs_io_flags(kiocb
), cr
);
426 spl_fstrans_unmark(cookie
);
432 ssize_t read
= count
- uio
.uio_resid
;
433 kiocb
->ki_pos
+= read
;
435 zpl_file_accessed(filp
);
441 zpl_aio_write(struct kiocb
*kiocb
, const struct iovec
*iov
,
442 unsigned long nr_segs
, loff_t pos
)
445 fstrans_cookie_t cookie
;
446 struct file
*filp
= kiocb
->ki_filp
;
447 struct inode
*ip
= filp
->f_mapping
->host
;
451 ret
= generic_segment_checks(iov
, &nr_segs
, &count
, VERIFY_READ
);
455 ret
= generic_write_checks(filp
, &pos
, &count
, S_ISBLK(ip
->i_mode
));
462 zfs_uio_iovec_init(&uio
, iov
, nr_segs
, kiocb
->ki_pos
, UIO_USERSPACE
,
466 cookie
= spl_fstrans_mark();
468 int error
= -zfs_write(ITOZ(ip
), &uio
,
469 filp
->f_flags
| zfs_io_flags(kiocb
), cr
);
471 spl_fstrans_unmark(cookie
);
477 ssize_t wrote
= count
- uio
.uio_resid
;
478 kiocb
->ki_pos
+= wrote
;
482 #endif /* HAVE_VFS_RW_ITERATE */
484 #if defined(HAVE_VFS_RW_ITERATE)
486 zpl_direct_IO_impl(int rw
, struct kiocb
*kiocb
, struct iov_iter
*iter
)
489 return (zpl_iter_write(kiocb
, iter
));
491 return (zpl_iter_read(kiocb
, iter
));
493 #if defined(HAVE_VFS_DIRECT_IO_ITER)
495 zpl_direct_IO(struct kiocb
*kiocb
, struct iov_iter
*iter
)
497 return (zpl_direct_IO_impl(iov_iter_rw(iter
), kiocb
, iter
));
499 #elif defined(HAVE_VFS_DIRECT_IO_ITER_OFFSET)
501 zpl_direct_IO(struct kiocb
*kiocb
, struct iov_iter
*iter
, loff_t pos
)
503 ASSERT3S(pos
, ==, kiocb
->ki_pos
);
504 return (zpl_direct_IO_impl(iov_iter_rw(iter
), kiocb
, iter
));
506 #elif defined(HAVE_VFS_DIRECT_IO_ITER_RW_OFFSET)
508 zpl_direct_IO(int rw
, struct kiocb
*kiocb
, struct iov_iter
*iter
, loff_t pos
)
510 ASSERT3S(pos
, ==, kiocb
->ki_pos
);
511 return (zpl_direct_IO_impl(rw
, kiocb
, iter
));
514 #error "Unknown direct IO interface"
517 #else /* HAVE_VFS_RW_ITERATE */
519 #if defined(HAVE_VFS_DIRECT_IO_IOVEC)
521 zpl_direct_IO(int rw
, struct kiocb
*kiocb
, const struct iovec
*iov
,
522 loff_t pos
, unsigned long nr_segs
)
525 return (zpl_aio_write(kiocb
, iov
, nr_segs
, pos
));
527 return (zpl_aio_read(kiocb
, iov
, nr_segs
, pos
));
529 #elif defined(HAVE_VFS_DIRECT_IO_ITER_RW_OFFSET)
531 zpl_direct_IO(int rw
, struct kiocb
*kiocb
, struct iov_iter
*iter
, loff_t pos
)
533 const struct iovec
*iovp
= iov_iter_iovec(iter
);
534 unsigned long nr_segs
= iter
->nr_segs
;
536 ASSERT3S(pos
, ==, kiocb
->ki_pos
);
538 return (zpl_aio_write(kiocb
, iovp
, nr_segs
, pos
));
540 return (zpl_aio_read(kiocb
, iovp
, nr_segs
, pos
));
543 #error "Unknown direct IO interface"
546 #endif /* HAVE_VFS_RW_ITERATE */
549 zpl_llseek(struct file
*filp
, loff_t offset
, int whence
)
551 #if defined(SEEK_HOLE) && defined(SEEK_DATA)
552 fstrans_cookie_t cookie
;
554 if (whence
== SEEK_DATA
|| whence
== SEEK_HOLE
) {
555 struct inode
*ip
= filp
->f_mapping
->host
;
556 loff_t maxbytes
= ip
->i_sb
->s_maxbytes
;
559 spl_inode_lock_shared(ip
);
560 cookie
= spl_fstrans_mark();
561 error
= -zfs_holey(ITOZ(ip
), whence
, &offset
);
562 spl_fstrans_unmark(cookie
);
564 error
= lseek_execute(filp
, ip
, offset
, maxbytes
);
565 spl_inode_unlock_shared(ip
);
569 #endif /* SEEK_HOLE && SEEK_DATA */
571 return (generic_file_llseek(filp
, offset
, whence
));
575 * It's worth taking a moment to describe how mmap is implemented
576 * for zfs because it differs considerably from other Linux filesystems.
577 * However, this issue is handled the same way under OpenSolaris.
579 * The issue is that by design zfs bypasses the Linux page cache and
580 * leaves all caching up to the ARC. This has been shown to work
581 * well for the common read(2)/write(2) case. However, mmap(2)
582 * is problem because it relies on being tightly integrated with the
583 * page cache. To handle this we cache mmap'ed files twice, once in
584 * the ARC and a second time in the page cache. The code is careful
585 * to keep both copies synchronized.
587 * When a file with an mmap'ed region is written to using write(2)
588 * both the data in the ARC and existing pages in the page cache
589 * are updated. For a read(2) data will be read first from the page
590 * cache then the ARC if needed. Neither a write(2) or read(2) will
591 * will ever result in new pages being added to the page cache.
593 * New pages are added to the page cache only via .readpage() which
594 * is called when the vfs needs to read a page off disk to back the
595 * virtual memory region. These pages may be modified without
596 * notifying the ARC and will be written out periodically via
597 * .writepage(). This will occur due to either a sync or the usual
598 * page aging behavior. Note because a read(2) of a mmap'ed file
599 * will always check the page cache first even when the ARC is out
600 * of date correct data will still be returned.
602 * While this implementation ensures correct behavior it does have
603 * have some drawbacks. The most obvious of which is that it
604 * increases the required memory footprint when access mmap'ed
605 * files. It also adds additional complexity to the code keeping
606 * both caches synchronized.
608 * Longer term it may be possible to cleanly resolve this wart by
609 * mapping page cache pages directly on to the ARC buffers. The
610 * Linux address space operations are flexible enough to allow
611 * selection of which pages back a particular index. The trick
612 * would be working out the details of which subsystem is in
613 * charge, the ARC, the page cache, or both. It may also prove
614 * helpful to move the ARC buffers to a scatter-gather lists
615 * rather than a vmalloc'ed region.
618 zpl_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
620 struct inode
*ip
= filp
->f_mapping
->host
;
621 znode_t
*zp
= ITOZ(ip
);
623 fstrans_cookie_t cookie
;
625 cookie
= spl_fstrans_mark();
626 error
= -zfs_map(ip
, vma
->vm_pgoff
, (caddr_t
*)vma
->vm_start
,
627 (size_t)(vma
->vm_end
- vma
->vm_start
), vma
->vm_flags
);
628 spl_fstrans_unmark(cookie
);
632 error
= generic_file_mmap(filp
, vma
);
636 mutex_enter(&zp
->z_lock
);
637 zp
->z_is_mapped
= B_TRUE
;
638 mutex_exit(&zp
->z_lock
);
644 * Populate a page with data for the Linux page cache. This function is
645 * only used to support mmap(2). There will be an identical copy of the
646 * data in the ARC which is kept up to date via .write() and .writepage().
649 zpl_readpage_common(struct page
*pp
)
654 fstrans_cookie_t cookie
;
656 ASSERT(PageLocked(pp
));
657 ip
= pp
->mapping
->host
;
660 cookie
= spl_fstrans_mark();
661 error
= -zfs_getpage(ip
, pl
, 1);
662 spl_fstrans_unmark(cookie
);
666 ClearPageUptodate(pp
);
670 flush_dcache_page(pp
);
677 #ifdef HAVE_VFS_READ_FOLIO
679 zpl_read_folio(struct file
*filp
, struct folio
*folio
)
681 return (zpl_readpage_common(&folio
->page
));
685 zpl_readpage(struct file
*filp
, struct page
*pp
)
687 return (zpl_readpage_common(pp
));
692 zpl_readpage_filler(void *data
, struct page
*pp
)
694 return (zpl_readpage_common(pp
));
698 * Populate a set of pages with data for the Linux page cache. This
699 * function will only be called for read ahead and never for demand
700 * paging. For simplicity, the code relies on read_cache_pages() to
701 * correctly lock each page for IO and call zpl_readpage().
703 #ifdef HAVE_VFS_READPAGES
705 zpl_readpages(struct file
*filp
, struct address_space
*mapping
,
706 struct list_head
*pages
, unsigned nr_pages
)
708 return (read_cache_pages(mapping
, pages
, zpl_readpage_filler
, NULL
));
712 zpl_readahead(struct readahead_control
*ractl
)
716 while ((page
= readahead_page(ractl
)) != NULL
) {
719 ret
= zpl_readpage_filler(NULL
, page
);
728 zpl_putpage(struct page
*pp
, struct writeback_control
*wbc
, void *data
)
730 boolean_t
*for_sync
= data
;
731 fstrans_cookie_t cookie
;
733 ASSERT(PageLocked(pp
));
734 ASSERT(!PageWriteback(pp
));
736 cookie
= spl_fstrans_mark();
737 (void) zfs_putpage(pp
->mapping
->host
, pp
, wbc
, *for_sync
);
738 spl_fstrans_unmark(cookie
);
744 zpl_writepages(struct address_space
*mapping
, struct writeback_control
*wbc
)
746 znode_t
*zp
= ITOZ(mapping
->host
);
747 zfsvfs_t
*zfsvfs
= ITOZSB(mapping
->host
);
748 enum writeback_sync_modes sync_mode
;
752 if (zfsvfs
->z_os
->os_sync
== ZFS_SYNC_ALWAYS
)
753 wbc
->sync_mode
= WB_SYNC_ALL
;
755 sync_mode
= wbc
->sync_mode
;
758 * We don't want to run write_cache_pages() in SYNC mode here, because
759 * that would make putpage() wait for a single page to be committed to
760 * disk every single time, resulting in atrocious performance. Instead
761 * we run it once in non-SYNC mode so that the ZIL gets all the data,
762 * and then we commit it all in one go.
764 boolean_t for_sync
= (sync_mode
== WB_SYNC_ALL
);
765 wbc
->sync_mode
= WB_SYNC_NONE
;
766 result
= write_cache_pages(mapping
, wbc
, zpl_putpage
, &for_sync
);
767 if (sync_mode
!= wbc
->sync_mode
) {
770 if (zfsvfs
->z_log
!= NULL
)
771 zil_commit(zfsvfs
->z_log
, zp
->z_id
);
775 * We need to call write_cache_pages() again (we can't just
776 * return after the commit) because the previous call in
777 * non-SYNC mode does not guarantee that we got all the dirty
778 * pages (see the implementation of write_cache_pages() for
779 * details). That being said, this is a no-op in most cases.
781 wbc
->sync_mode
= sync_mode
;
782 result
= write_cache_pages(mapping
, wbc
, zpl_putpage
,
789 * Write out dirty pages to the ARC, this function is only required to
790 * support mmap(2). Mapped pages may be dirtied by memory operations
791 * which never call .write(). These dirty pages are kept in sync with
792 * the ARC buffers via this hook.
795 zpl_writepage(struct page
*pp
, struct writeback_control
*wbc
)
797 if (ITOZSB(pp
->mapping
->host
)->z_os
->os_sync
== ZFS_SYNC_ALWAYS
)
798 wbc
->sync_mode
= WB_SYNC_ALL
;
800 boolean_t for_sync
= (wbc
->sync_mode
== WB_SYNC_ALL
);
802 return (zpl_putpage(pp
, wbc
, &for_sync
));
806 * The flag combination which matches the behavior of zfs_space() is
807 * FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE. The FALLOC_FL_PUNCH_HOLE
808 * flag was introduced in the 2.6.38 kernel.
810 * The original mode=0 (allocate space) behavior can be reasonably emulated
811 * by checking if enough space exists and creating a sparse file, as real
812 * persistent space reservation is not possible due to COW, snapshots, etc.
815 zpl_fallocate_common(struct inode
*ip
, int mode
, loff_t offset
, loff_t len
)
819 fstrans_cookie_t cookie
;
822 int test_mode
= FALLOC_FL_PUNCH_HOLE
;
823 #ifdef HAVE_FALLOC_FL_ZERO_RANGE
824 test_mode
|= FALLOC_FL_ZERO_RANGE
;
827 if ((mode
& ~(FALLOC_FL_KEEP_SIZE
| test_mode
)) != 0)
828 return (-EOPNOTSUPP
);
830 if (offset
< 0 || len
<= 0)
834 olen
= i_size_read(ip
);
837 cookie
= spl_fstrans_mark();
838 if (mode
& (test_mode
)) {
841 if (mode
& FALLOC_FL_KEEP_SIZE
) {
845 if (offset
+ len
> olen
)
849 bf
.l_whence
= SEEK_SET
;
854 error
= -zfs_space(ITOZ(ip
), F_FREESP
, &bf
, O_RDWR
, offset
, cr
);
855 } else if ((mode
& ~FALLOC_FL_KEEP_SIZE
) == 0) {
856 unsigned int percent
= zfs_fallocate_reserve_percent
;
857 struct kstatfs statfs
;
859 /* Legacy mode, disable fallocate compatibility. */
866 * Use zfs_statvfs() instead of dmu_objset_space() since it
867 * also checks project quota limits, which are relevant here.
869 error
= zfs_statvfs(ip
, &statfs
);
874 * Shrink available space a bit to account for overhead/races.
875 * We know the product previously fit into availbytes from
876 * dmu_objset_space(), so the smaller product will also fit.
878 if (len
> statfs
.f_bavail
* (statfs
.f_bsize
* 100 / percent
)) {
882 if (!(mode
& FALLOC_FL_KEEP_SIZE
) && offset
+ len
> olen
)
883 error
= zfs_freesp(ITOZ(ip
), offset
+ len
, 0, 0, FALSE
);
886 spl_fstrans_unmark(cookie
);
887 spl_inode_unlock(ip
);
895 zpl_fallocate(struct file
*filp
, int mode
, loff_t offset
, loff_t len
)
897 return zpl_fallocate_common(file_inode(filp
),
902 zpl_ioctl_getversion(struct file
*filp
, void __user
*arg
)
904 uint32_t generation
= file_inode(filp
)->i_generation
;
906 return (copy_to_user(arg
, &generation
, sizeof (generation
)));
909 #define ZFS_FL_USER_VISIBLE (FS_FL_USER_VISIBLE | ZFS_PROJINHERIT_FL)
910 #define ZFS_FL_USER_MODIFIABLE (FS_FL_USER_MODIFIABLE | ZFS_PROJINHERIT_FL)
913 __zpl_ioctl_getflags(struct inode
*ip
)
915 uint64_t zfs_flags
= ITOZ(ip
)->z_pflags
;
916 uint32_t ioctl_flags
= 0;
918 if (zfs_flags
& ZFS_IMMUTABLE
)
919 ioctl_flags
|= FS_IMMUTABLE_FL
;
921 if (zfs_flags
& ZFS_APPENDONLY
)
922 ioctl_flags
|= FS_APPEND_FL
;
924 if (zfs_flags
& ZFS_NODUMP
)
925 ioctl_flags
|= FS_NODUMP_FL
;
927 if (zfs_flags
& ZFS_PROJINHERIT
)
928 ioctl_flags
|= ZFS_PROJINHERIT_FL
;
930 return (ioctl_flags
& ZFS_FL_USER_VISIBLE
);
934 * Map zfs file z_pflags (xvattr_t) to linux file attributes. Only file
935 * attributes common to both Linux and Solaris are mapped.
938 zpl_ioctl_getflags(struct file
*filp
, void __user
*arg
)
943 flags
= __zpl_ioctl_getflags(file_inode(filp
));
944 err
= copy_to_user(arg
, &flags
, sizeof (flags
));
950 * fchange() is a helper macro to detect if we have been asked to change a
951 * flag. This is ugly, but the requirement that we do this is a consequence of
952 * how the Linux file attribute interface was designed. Another consequence is
953 * that concurrent modification of files suffers from a TOCTOU race. Neither
954 * are things we can fix without modifying the kernel-userland interface, which
955 * is outside of our jurisdiction.
958 #define fchange(f0, f1, b0, b1) (!((f0) & (b0)) != !((f1) & (b1)))
961 __zpl_ioctl_setflags(struct inode
*ip
, uint32_t ioctl_flags
, xvattr_t
*xva
)
963 uint64_t zfs_flags
= ITOZ(ip
)->z_pflags
;
966 if (ioctl_flags
& ~(FS_IMMUTABLE_FL
| FS_APPEND_FL
| FS_NODUMP_FL
|
968 return (-EOPNOTSUPP
);
970 if (ioctl_flags
& ~ZFS_FL_USER_MODIFIABLE
)
973 if ((fchange(ioctl_flags
, zfs_flags
, FS_IMMUTABLE_FL
, ZFS_IMMUTABLE
) ||
974 fchange(ioctl_flags
, zfs_flags
, FS_APPEND_FL
, ZFS_APPENDONLY
)) &&
975 !capable(CAP_LINUX_IMMUTABLE
))
978 if (!zpl_inode_owner_or_capable(kcred
->user_ns
, ip
))
982 xoap
= xva_getxoptattr(xva
);
984 #define FLAG_CHANGE(iflag, zflag, xflag, xfield) do { \
985 if (((ioctl_flags & (iflag)) && !(zfs_flags & (zflag))) || \
986 ((zfs_flags & (zflag)) && !(ioctl_flags & (iflag)))) { \
987 XVA_SET_REQ(xva, (xflag)); \
988 (xfield) = ((ioctl_flags & (iflag)) != 0); \
992 FLAG_CHANGE(FS_IMMUTABLE_FL
, ZFS_IMMUTABLE
, XAT_IMMUTABLE
,
993 xoap
->xoa_immutable
);
994 FLAG_CHANGE(FS_APPEND_FL
, ZFS_APPENDONLY
, XAT_APPENDONLY
,
995 xoap
->xoa_appendonly
);
996 FLAG_CHANGE(FS_NODUMP_FL
, ZFS_NODUMP
, XAT_NODUMP
,
998 FLAG_CHANGE(ZFS_PROJINHERIT_FL
, ZFS_PROJINHERIT
, XAT_PROJINHERIT
,
999 xoap
->xoa_projinherit
);
1007 zpl_ioctl_setflags(struct file
*filp
, void __user
*arg
)
1009 struct inode
*ip
= file_inode(filp
);
1011 cred_t
*cr
= CRED();
1014 fstrans_cookie_t cookie
;
1016 if (copy_from_user(&flags
, arg
, sizeof (flags
)))
1019 err
= __zpl_ioctl_setflags(ip
, flags
, &xva
);
1024 cookie
= spl_fstrans_mark();
1025 err
= -zfs_setattr(ITOZ(ip
), (vattr_t
*)&xva
, 0, cr
);
1026 spl_fstrans_unmark(cookie
);
1033 zpl_ioctl_getxattr(struct file
*filp
, void __user
*arg
)
1035 zfsxattr_t fsx
= { 0 };
1036 struct inode
*ip
= file_inode(filp
);
1039 fsx
.fsx_xflags
= __zpl_ioctl_getflags(ip
);
1040 fsx
.fsx_projid
= ITOZ(ip
)->z_projid
;
1041 err
= copy_to_user(arg
, &fsx
, sizeof (fsx
));
1047 zpl_ioctl_setxattr(struct file
*filp
, void __user
*arg
)
1049 struct inode
*ip
= file_inode(filp
);
1051 cred_t
*cr
= CRED();
1055 fstrans_cookie_t cookie
;
1057 if (copy_from_user(&fsx
, arg
, sizeof (fsx
)))
1060 if (!zpl_is_valid_projid(fsx
.fsx_projid
))
1063 err
= __zpl_ioctl_setflags(ip
, fsx
.fsx_xflags
, &xva
);
1067 xoap
= xva_getxoptattr(&xva
);
1068 XVA_SET_REQ(&xva
, XAT_PROJID
);
1069 xoap
->xoa_projid
= fsx
.fsx_projid
;
1072 cookie
= spl_fstrans_mark();
1073 err
= -zfs_setattr(ITOZ(ip
), (vattr_t
*)&xva
, 0, cr
);
1074 spl_fstrans_unmark(cookie
);
1081 * Expose Additional File Level Attributes of ZFS.
1084 zpl_ioctl_getdosflags(struct file
*filp
, void __user
*arg
)
1086 struct inode
*ip
= file_inode(filp
);
1087 uint64_t dosflags
= ITOZ(ip
)->z_pflags
;
1088 dosflags
&= ZFS_DOS_FL_USER_VISIBLE
;
1089 int err
= copy_to_user(arg
, &dosflags
, sizeof (dosflags
));
1095 __zpl_ioctl_setdosflags(struct inode
*ip
, uint64_t ioctl_flags
, xvattr_t
*xva
)
1097 uint64_t zfs_flags
= ITOZ(ip
)->z_pflags
;
1100 if (ioctl_flags
& (~ZFS_DOS_FL_USER_VISIBLE
))
1101 return (-EOPNOTSUPP
);
1103 if ((fchange(ioctl_flags
, zfs_flags
, ZFS_IMMUTABLE
, ZFS_IMMUTABLE
) ||
1104 fchange(ioctl_flags
, zfs_flags
, ZFS_APPENDONLY
, ZFS_APPENDONLY
)) &&
1105 !capable(CAP_LINUX_IMMUTABLE
))
1108 if (!zpl_inode_owner_or_capable(kcred
->user_ns
, ip
))
1112 xoap
= xva_getxoptattr(xva
);
1114 #define FLAG_CHANGE(iflag, xflag, xfield) do { \
1115 if (((ioctl_flags & (iflag)) && !(zfs_flags & (iflag))) || \
1116 ((zfs_flags & (iflag)) && !(ioctl_flags & (iflag)))) { \
1117 XVA_SET_REQ(xva, (xflag)); \
1118 (xfield) = ((ioctl_flags & (iflag)) != 0); \
1122 FLAG_CHANGE(ZFS_IMMUTABLE
, XAT_IMMUTABLE
, xoap
->xoa_immutable
);
1123 FLAG_CHANGE(ZFS_APPENDONLY
, XAT_APPENDONLY
, xoap
->xoa_appendonly
);
1124 FLAG_CHANGE(ZFS_NODUMP
, XAT_NODUMP
, xoap
->xoa_nodump
);
1125 FLAG_CHANGE(ZFS_READONLY
, XAT_READONLY
, xoap
->xoa_readonly
);
1126 FLAG_CHANGE(ZFS_HIDDEN
, XAT_HIDDEN
, xoap
->xoa_hidden
);
1127 FLAG_CHANGE(ZFS_SYSTEM
, XAT_SYSTEM
, xoap
->xoa_system
);
1128 FLAG_CHANGE(ZFS_ARCHIVE
, XAT_ARCHIVE
, xoap
->xoa_archive
);
1129 FLAG_CHANGE(ZFS_NOUNLINK
, XAT_NOUNLINK
, xoap
->xoa_nounlink
);
1130 FLAG_CHANGE(ZFS_REPARSE
, XAT_REPARSE
, xoap
->xoa_reparse
);
1131 FLAG_CHANGE(ZFS_OFFLINE
, XAT_OFFLINE
, xoap
->xoa_offline
);
1132 FLAG_CHANGE(ZFS_SPARSE
, XAT_SPARSE
, xoap
->xoa_sparse
);
1140 * Set Additional File Level Attributes of ZFS.
1143 zpl_ioctl_setdosflags(struct file
*filp
, void __user
*arg
)
1145 struct inode
*ip
= file_inode(filp
);
1147 cred_t
*cr
= CRED();
1150 fstrans_cookie_t cookie
;
1152 if (copy_from_user(&dosflags
, arg
, sizeof (dosflags
)))
1155 err
= __zpl_ioctl_setdosflags(ip
, dosflags
, &xva
);
1160 cookie
= spl_fstrans_mark();
1161 err
= -zfs_setattr(ITOZ(ip
), (vattr_t
*)&xva
, 0, cr
);
1162 spl_fstrans_unmark(cookie
);
1169 zpl_ioctl(struct file
*filp
, unsigned int cmd
, unsigned long arg
)
1172 case FS_IOC_GETVERSION
:
1173 return (zpl_ioctl_getversion(filp
, (void *)arg
));
1174 case FS_IOC_GETFLAGS
:
1175 return (zpl_ioctl_getflags(filp
, (void *)arg
));
1176 case FS_IOC_SETFLAGS
:
1177 return (zpl_ioctl_setflags(filp
, (void *)arg
));
1178 case ZFS_IOC_FSGETXATTR
:
1179 return (zpl_ioctl_getxattr(filp
, (void *)arg
));
1180 case ZFS_IOC_FSSETXATTR
:
1181 return (zpl_ioctl_setxattr(filp
, (void *)arg
));
1182 case ZFS_IOC_GETDOSFLAGS
:
1183 return (zpl_ioctl_getdosflags(filp
, (void *)arg
));
1184 case ZFS_IOC_SETDOSFLAGS
:
1185 return (zpl_ioctl_setdosflags(filp
, (void *)arg
));
1191 #ifdef CONFIG_COMPAT
1193 zpl_compat_ioctl(struct file
*filp
, unsigned int cmd
, unsigned long arg
)
1196 case FS_IOC32_GETVERSION
:
1197 cmd
= FS_IOC_GETVERSION
;
1199 case FS_IOC32_GETFLAGS
:
1200 cmd
= FS_IOC_GETFLAGS
;
1202 case FS_IOC32_SETFLAGS
:
1203 cmd
= FS_IOC_SETFLAGS
;
1208 return (zpl_ioctl(filp
, cmd
, (unsigned long)compat_ptr(arg
)));
1210 #endif /* CONFIG_COMPAT */
1213 const struct address_space_operations zpl_address_space_operations
= {
1214 #ifdef HAVE_VFS_READPAGES
1215 .readpages
= zpl_readpages
,
1217 .readahead
= zpl_readahead
,
1219 #ifdef HAVE_VFS_READ_FOLIO
1220 .read_folio
= zpl_read_folio
,
1222 .readpage
= zpl_readpage
,
1224 .writepage
= zpl_writepage
,
1225 .writepages
= zpl_writepages
,
1226 .direct_IO
= zpl_direct_IO
,
1227 #ifdef HAVE_VFS_SET_PAGE_DIRTY_NOBUFFERS
1228 .set_page_dirty
= __set_page_dirty_nobuffers
,
1230 #ifdef HAVE_VFS_FILEMAP_DIRTY_FOLIO
1231 .dirty_folio
= filemap_dirty_folio
,
1235 const struct file_operations zpl_file_operations
= {
1237 .release
= zpl_release
,
1238 .llseek
= zpl_llseek
,
1239 #ifdef HAVE_VFS_RW_ITERATE
1240 #ifdef HAVE_NEW_SYNC_READ
1241 .read
= new_sync_read
,
1242 .write
= new_sync_write
,
1244 .read_iter
= zpl_iter_read
,
1245 .write_iter
= zpl_iter_write
,
1246 #ifdef HAVE_VFS_IOV_ITER
1247 .splice_read
= generic_file_splice_read
,
1248 .splice_write
= iter_file_splice_write
,
1251 .read
= do_sync_read
,
1252 .write
= do_sync_write
,
1253 .aio_read
= zpl_aio_read
,
1254 .aio_write
= zpl_aio_write
,
1258 #ifdef HAVE_FILE_AIO_FSYNC
1259 .aio_fsync
= zpl_aio_fsync
,
1261 .fallocate
= zpl_fallocate
,
1262 .unlocked_ioctl
= zpl_ioctl
,
1263 #ifdef CONFIG_COMPAT
1264 .compat_ioctl
= zpl_compat_ioctl
,
1268 const struct file_operations zpl_dir_file_operations
= {
1269 .llseek
= generic_file_llseek
,
1270 .read
= generic_read_dir
,
1271 #if defined(HAVE_VFS_ITERATE_SHARED)
1272 .iterate_shared
= zpl_iterate
,
1273 #elif defined(HAVE_VFS_ITERATE)
1274 .iterate
= zpl_iterate
,
1276 .readdir
= zpl_readdir
,
1279 .unlocked_ioctl
= zpl_ioctl
,
1280 #ifdef CONFIG_COMPAT
1281 .compat_ioctl
= zpl_compat_ioctl
,
1286 module_param(zfs_fallocate_reserve_percent
, uint
, 0644);
1287 MODULE_PARM_DESC(zfs_fallocate_reserve_percent
,
1288 "Percentage of length to use for the available capacity check");