4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or https://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2011, Lawrence Livermore National Security, LLC.
23 * Copyright (c) 2015 by Chunwei Chen. All rights reserved.
28 #include <linux/compat.h>
32 #include <sys/dmu_objset.h>
33 #include <sys/zfs_znode.h>
34 #include <sys/zfs_vfsops.h>
35 #include <sys/zfs_vnops.h>
36 #include <sys/zfs_project.h>
37 #if defined(HAVE_VFS_SET_PAGE_DIRTY_NOBUFFERS) || \
38 defined(HAVE_VFS_FILEMAP_DIRTY_FOLIO)
39 #include <linux/pagemap.h>
41 #ifdef HAVE_FILE_FADVISE
42 #include <linux/fadvise.h>
44 #ifdef HAVE_VFS_FILEMAP_DIRTY_FOLIO
45 #include <linux/writeback.h>
49 * When using fallocate(2) to preallocate space, inflate the requested
50 * capacity check by 10% to account for the required metadata blocks.
52 static unsigned int zfs_fallocate_reserve_percent
= 110;
55 zpl_open(struct inode
*ip
, struct file
*filp
)
59 fstrans_cookie_t cookie
;
61 error
= generic_file_open(ip
, filp
);
66 cookie
= spl_fstrans_mark();
67 error
= -zfs_open(ip
, filp
->f_mode
, filp
->f_flags
, cr
);
68 spl_fstrans_unmark(cookie
);
70 ASSERT3S(error
, <=, 0);
76 zpl_release(struct inode
*ip
, struct file
*filp
)
80 fstrans_cookie_t cookie
;
82 cookie
= spl_fstrans_mark();
83 if (ITOZ(ip
)->z_atime_dirty
)
84 zfs_mark_inode_dirty(ip
);
87 error
= -zfs_close(ip
, filp
->f_flags
, cr
);
88 spl_fstrans_unmark(cookie
);
90 ASSERT3S(error
, <=, 0);
96 zpl_iterate(struct file
*filp
, zpl_dir_context_t
*ctx
)
100 fstrans_cookie_t cookie
;
103 cookie
= spl_fstrans_mark();
104 error
= -zfs_readdir(file_inode(filp
), ctx
, cr
);
105 spl_fstrans_unmark(cookie
);
107 ASSERT3S(error
, <=, 0);
112 #if !defined(HAVE_VFS_ITERATE) && !defined(HAVE_VFS_ITERATE_SHARED)
114 zpl_readdir(struct file
*filp
, void *dirent
, filldir_t filldir
)
116 zpl_dir_context_t ctx
=
117 ZPL_DIR_CONTEXT_INIT(dirent
, filldir
, filp
->f_pos
);
120 error
= zpl_iterate(filp
, &ctx
);
121 filp
->f_pos
= ctx
.pos
;
125 #endif /* !HAVE_VFS_ITERATE && !HAVE_VFS_ITERATE_SHARED */
127 #if defined(HAVE_FSYNC_WITHOUT_DENTRY)
129 * Linux 2.6.35 - 3.0 API,
130 * As of 2.6.35 the dentry argument to the fops->fsync() hook was deemed
131 * redundant. The dentry is still accessible via filp->f_path.dentry,
132 * and we are guaranteed that filp will never be NULL.
135 zpl_fsync(struct file
*filp
, int datasync
)
137 struct inode
*inode
= filp
->f_mapping
->host
;
140 fstrans_cookie_t cookie
;
143 cookie
= spl_fstrans_mark();
144 error
= -zfs_fsync(ITOZ(inode
), datasync
, cr
);
145 spl_fstrans_unmark(cookie
);
147 ASSERT3S(error
, <=, 0);
152 #ifdef HAVE_FILE_AIO_FSYNC
154 zpl_aio_fsync(struct kiocb
*kiocb
, int datasync
)
156 return (zpl_fsync(kiocb
->ki_filp
, datasync
));
160 #elif defined(HAVE_FSYNC_RANGE)
163 * As of 3.1 the responsibility to call filemap_write_and_wait_range() has
164 * been pushed down in to the .fsync() vfs hook. Additionally, the i_mutex
165 * lock is no longer held by the caller, for zfs we don't require the lock
166 * to be held so we don't acquire it.
169 zpl_fsync(struct file
*filp
, loff_t start
, loff_t end
, int datasync
)
171 struct inode
*inode
= filp
->f_mapping
->host
;
172 znode_t
*zp
= ITOZ(inode
);
173 zfsvfs_t
*zfsvfs
= ITOZSB(inode
);
176 fstrans_cookie_t cookie
;
179 * The variables z_sync_writes_cnt and z_async_writes_cnt work in
180 * tandem so that sync writes can detect if there are any non-sync
181 * writes going on and vice-versa. The "vice-versa" part to this logic
182 * is located in zfs_putpage() where non-sync writes check if there are
183 * any ongoing sync writes. If any sync and non-sync writes overlap,
184 * we do a commit to complete the non-sync writes since the latter can
185 * potentially take several seconds to complete and thus block sync
186 * writes in the upcoming call to filemap_write_and_wait_range().
188 atomic_inc_32(&zp
->z_sync_writes_cnt
);
190 * If the following check does not detect an overlapping non-sync write
191 * (say because it's just about to start), then it is guaranteed that
192 * the non-sync write will detect this sync write. This is because we
193 * always increment z_sync_writes_cnt / z_async_writes_cnt before doing
194 * the check on z_async_writes_cnt / z_sync_writes_cnt here and in
195 * zfs_putpage() respectively.
197 if (atomic_load_32(&zp
->z_async_writes_cnt
) > 0) {
198 if ((error
= zpl_enter(zfsvfs
, FTAG
)) != 0) {
199 atomic_dec_32(&zp
->z_sync_writes_cnt
);
202 zil_commit(zfsvfs
->z_log
, zp
->z_id
);
203 zpl_exit(zfsvfs
, FTAG
);
206 error
= filemap_write_and_wait_range(inode
->i_mapping
, start
, end
);
209 * The sync write is not complete yet but we decrement
210 * z_sync_writes_cnt since zfs_fsync() increments and decrements
211 * it internally. If a non-sync write starts just after the decrement
212 * operation but before we call zfs_fsync(), it may not detect this
213 * overlapping sync write but it does not matter since we have already
214 * gone past filemap_write_and_wait_range() and we won't block due to
215 * the non-sync write.
217 atomic_dec_32(&zp
->z_sync_writes_cnt
);
223 cookie
= spl_fstrans_mark();
224 error
= -zfs_fsync(zp
, datasync
, cr
);
225 spl_fstrans_unmark(cookie
);
227 ASSERT3S(error
, <=, 0);
232 #ifdef HAVE_FILE_AIO_FSYNC
234 zpl_aio_fsync(struct kiocb
*kiocb
, int datasync
)
236 return (zpl_fsync(kiocb
->ki_filp
, kiocb
->ki_pos
, -1, datasync
));
241 #error "Unsupported fops->fsync() implementation"
245 zfs_io_flags(struct kiocb
*kiocb
)
249 #if defined(IOCB_DSYNC)
250 if (kiocb
->ki_flags
& IOCB_DSYNC
)
253 #if defined(IOCB_SYNC)
254 if (kiocb
->ki_flags
& IOCB_SYNC
)
257 #if defined(IOCB_APPEND)
258 if (kiocb
->ki_flags
& IOCB_APPEND
)
261 #if defined(IOCB_DIRECT)
262 if (kiocb
->ki_flags
& IOCB_DIRECT
)
269 * If relatime is enabled, call file_accessed() if zfs_relatime_need_update()
270 * is true. This is needed since datasets with inherited "relatime" property
271 * aren't necessarily mounted with the MNT_RELATIME flag (e.g. after
272 * `zfs set relatime=...`), which is what relatime test in VFS by
273 * relatime_need_update() is based on.
276 zpl_file_accessed(struct file
*filp
)
278 struct inode
*ip
= filp
->f_mapping
->host
;
280 if (!IS_NOATIME(ip
) && ITOZSB(ip
)->z_relatime
) {
281 if (zfs_relatime_need_update(ip
))
288 #if defined(HAVE_VFS_RW_ITERATE)
291 * When HAVE_VFS_IOV_ITER is defined the iov_iter structure supports
292 * iovecs, kvevs, bvecs and pipes, plus all the required interfaces to
293 * manipulate the iov_iter are available. In which case the full iov_iter
294 * can be attached to the uio and correctly handled in the lower layers.
295 * Otherwise, for older kernels extract the iovec and pass it instead.
298 zpl_uio_init(zfs_uio_t
*uio
, struct kiocb
*kiocb
, struct iov_iter
*to
,
299 loff_t pos
, ssize_t count
, size_t skip
)
301 #if defined(HAVE_VFS_IOV_ITER)
302 zfs_uio_iov_iter_init(uio
, to
, pos
, count
, skip
);
304 #ifdef HAVE_IOV_ITER_TYPE
305 zfs_uio_iovec_init(uio
, to
->iov
, to
->nr_segs
, pos
,
306 iov_iter_type(to
) & ITER_KVEC
? UIO_SYSSPACE
: UIO_USERSPACE
,
309 zfs_uio_iovec_init(uio
, to
->iov
, to
->nr_segs
, pos
,
310 to
->type
& ITER_KVEC
? UIO_SYSSPACE
: UIO_USERSPACE
,
317 zpl_iter_read(struct kiocb
*kiocb
, struct iov_iter
*to
)
320 fstrans_cookie_t cookie
;
321 struct file
*filp
= kiocb
->ki_filp
;
322 ssize_t count
= iov_iter_count(to
);
325 zpl_uio_init(&uio
, kiocb
, to
, kiocb
->ki_pos
, count
, 0);
328 cookie
= spl_fstrans_mark();
330 int error
= -zfs_read(ITOZ(filp
->f_mapping
->host
), &uio
,
331 filp
->f_flags
| zfs_io_flags(kiocb
), cr
);
333 spl_fstrans_unmark(cookie
);
339 ssize_t read
= count
- uio
.uio_resid
;
340 kiocb
->ki_pos
+= read
;
342 zpl_file_accessed(filp
);
347 static inline ssize_t
348 zpl_generic_write_checks(struct kiocb
*kiocb
, struct iov_iter
*from
,
351 #ifdef HAVE_GENERIC_WRITE_CHECKS_KIOCB
352 ssize_t ret
= generic_write_checks(kiocb
, from
);
358 struct file
*file
= kiocb
->ki_filp
;
359 struct address_space
*mapping
= file
->f_mapping
;
360 struct inode
*ip
= mapping
->host
;
361 int isblk
= S_ISBLK(ip
->i_mode
);
363 *countp
= iov_iter_count(from
);
364 ssize_t ret
= generic_write_checks(file
, &kiocb
->ki_pos
, countp
, isblk
);
373 zpl_iter_write(struct kiocb
*kiocb
, struct iov_iter
*from
)
376 fstrans_cookie_t cookie
;
377 struct file
*filp
= kiocb
->ki_filp
;
378 struct inode
*ip
= filp
->f_mapping
->host
;
383 ret
= zpl_generic_write_checks(kiocb
, from
, &count
);
387 zpl_uio_init(&uio
, kiocb
, from
, kiocb
->ki_pos
, count
, from
->iov_offset
);
390 cookie
= spl_fstrans_mark();
392 int error
= -zfs_write(ITOZ(ip
), &uio
,
393 filp
->f_flags
| zfs_io_flags(kiocb
), cr
);
395 spl_fstrans_unmark(cookie
);
401 ssize_t wrote
= count
- uio
.uio_resid
;
402 kiocb
->ki_pos
+= wrote
;
407 #else /* !HAVE_VFS_RW_ITERATE */
410 zpl_aio_read(struct kiocb
*kiocb
, const struct iovec
*iov
,
411 unsigned long nr_segs
, loff_t pos
)
414 fstrans_cookie_t cookie
;
415 struct file
*filp
= kiocb
->ki_filp
;
419 ret
= generic_segment_checks(iov
, &nr_segs
, &count
, VERIFY_WRITE
);
424 zfs_uio_iovec_init(&uio
, iov
, nr_segs
, kiocb
->ki_pos
, UIO_USERSPACE
,
428 cookie
= spl_fstrans_mark();
430 int error
= -zfs_read(ITOZ(filp
->f_mapping
->host
), &uio
,
431 filp
->f_flags
| zfs_io_flags(kiocb
), cr
);
433 spl_fstrans_unmark(cookie
);
439 ssize_t read
= count
- uio
.uio_resid
;
440 kiocb
->ki_pos
+= read
;
442 zpl_file_accessed(filp
);
448 zpl_aio_write(struct kiocb
*kiocb
, const struct iovec
*iov
,
449 unsigned long nr_segs
, loff_t pos
)
452 fstrans_cookie_t cookie
;
453 struct file
*filp
= kiocb
->ki_filp
;
454 struct inode
*ip
= filp
->f_mapping
->host
;
458 ret
= generic_segment_checks(iov
, &nr_segs
, &count
, VERIFY_READ
);
462 ret
= generic_write_checks(filp
, &pos
, &count
, S_ISBLK(ip
->i_mode
));
469 zfs_uio_iovec_init(&uio
, iov
, nr_segs
, kiocb
->ki_pos
, UIO_USERSPACE
,
473 cookie
= spl_fstrans_mark();
475 int error
= -zfs_write(ITOZ(ip
), &uio
,
476 filp
->f_flags
| zfs_io_flags(kiocb
), cr
);
478 spl_fstrans_unmark(cookie
);
484 ssize_t wrote
= count
- uio
.uio_resid
;
485 kiocb
->ki_pos
+= wrote
;
489 #endif /* HAVE_VFS_RW_ITERATE */
491 #if defined(HAVE_VFS_RW_ITERATE)
493 zpl_direct_IO_impl(int rw
, struct kiocb
*kiocb
, struct iov_iter
*iter
)
496 return (zpl_iter_write(kiocb
, iter
));
498 return (zpl_iter_read(kiocb
, iter
));
500 #if defined(HAVE_VFS_DIRECT_IO_ITER)
502 zpl_direct_IO(struct kiocb
*kiocb
, struct iov_iter
*iter
)
504 return (zpl_direct_IO_impl(iov_iter_rw(iter
), kiocb
, iter
));
506 #elif defined(HAVE_VFS_DIRECT_IO_ITER_OFFSET)
508 zpl_direct_IO(struct kiocb
*kiocb
, struct iov_iter
*iter
, loff_t pos
)
510 ASSERT3S(pos
, ==, kiocb
->ki_pos
);
511 return (zpl_direct_IO_impl(iov_iter_rw(iter
), kiocb
, iter
));
513 #elif defined(HAVE_VFS_DIRECT_IO_ITER_RW_OFFSET)
515 zpl_direct_IO(int rw
, struct kiocb
*kiocb
, struct iov_iter
*iter
, loff_t pos
)
517 ASSERT3S(pos
, ==, kiocb
->ki_pos
);
518 return (zpl_direct_IO_impl(rw
, kiocb
, iter
));
521 #error "Unknown direct IO interface"
524 #else /* HAVE_VFS_RW_ITERATE */
526 #if defined(HAVE_VFS_DIRECT_IO_IOVEC)
528 zpl_direct_IO(int rw
, struct kiocb
*kiocb
, const struct iovec
*iov
,
529 loff_t pos
, unsigned long nr_segs
)
532 return (zpl_aio_write(kiocb
, iov
, nr_segs
, pos
));
534 return (zpl_aio_read(kiocb
, iov
, nr_segs
, pos
));
536 #elif defined(HAVE_VFS_DIRECT_IO_ITER_RW_OFFSET)
538 zpl_direct_IO(int rw
, struct kiocb
*kiocb
, struct iov_iter
*iter
, loff_t pos
)
540 const struct iovec
*iovp
= iov_iter_iovec(iter
);
541 unsigned long nr_segs
= iter
->nr_segs
;
543 ASSERT3S(pos
, ==, kiocb
->ki_pos
);
545 return (zpl_aio_write(kiocb
, iovp
, nr_segs
, pos
));
547 return (zpl_aio_read(kiocb
, iovp
, nr_segs
, pos
));
550 #error "Unknown direct IO interface"
553 #endif /* HAVE_VFS_RW_ITERATE */
556 zpl_llseek(struct file
*filp
, loff_t offset
, int whence
)
558 #if defined(SEEK_HOLE) && defined(SEEK_DATA)
559 fstrans_cookie_t cookie
;
561 if (whence
== SEEK_DATA
|| whence
== SEEK_HOLE
) {
562 struct inode
*ip
= filp
->f_mapping
->host
;
563 loff_t maxbytes
= ip
->i_sb
->s_maxbytes
;
566 spl_inode_lock_shared(ip
);
567 cookie
= spl_fstrans_mark();
568 error
= -zfs_holey(ITOZ(ip
), whence
, &offset
);
569 spl_fstrans_unmark(cookie
);
571 error
= lseek_execute(filp
, ip
, offset
, maxbytes
);
572 spl_inode_unlock_shared(ip
);
576 #endif /* SEEK_HOLE && SEEK_DATA */
578 return (generic_file_llseek(filp
, offset
, whence
));
582 * It's worth taking a moment to describe how mmap is implemented
583 * for zfs because it differs considerably from other Linux filesystems.
584 * However, this issue is handled the same way under OpenSolaris.
586 * The issue is that by design zfs bypasses the Linux page cache and
587 * leaves all caching up to the ARC. This has been shown to work
588 * well for the common read(2)/write(2) case. However, mmap(2)
589 * is problem because it relies on being tightly integrated with the
590 * page cache. To handle this we cache mmap'ed files twice, once in
591 * the ARC and a second time in the page cache. The code is careful
592 * to keep both copies synchronized.
594 * When a file with an mmap'ed region is written to using write(2)
595 * both the data in the ARC and existing pages in the page cache
596 * are updated. For a read(2) data will be read first from the page
597 * cache then the ARC if needed. Neither a write(2) or read(2) will
598 * will ever result in new pages being added to the page cache.
600 * New pages are added to the page cache only via .readpage() which
601 * is called when the vfs needs to read a page off disk to back the
602 * virtual memory region. These pages may be modified without
603 * notifying the ARC and will be written out periodically via
604 * .writepage(). This will occur due to either a sync or the usual
605 * page aging behavior. Note because a read(2) of a mmap'ed file
606 * will always check the page cache first even when the ARC is out
607 * of date correct data will still be returned.
609 * While this implementation ensures correct behavior it does have
610 * have some drawbacks. The most obvious of which is that it
611 * increases the required memory footprint when access mmap'ed
612 * files. It also adds additional complexity to the code keeping
613 * both caches synchronized.
615 * Longer term it may be possible to cleanly resolve this wart by
616 * mapping page cache pages directly on to the ARC buffers. The
617 * Linux address space operations are flexible enough to allow
618 * selection of which pages back a particular index. The trick
619 * would be working out the details of which subsystem is in
620 * charge, the ARC, the page cache, or both. It may also prove
621 * helpful to move the ARC buffers to a scatter-gather lists
622 * rather than a vmalloc'ed region.
625 zpl_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
627 struct inode
*ip
= filp
->f_mapping
->host
;
629 fstrans_cookie_t cookie
;
631 cookie
= spl_fstrans_mark();
632 error
= -zfs_map(ip
, vma
->vm_pgoff
, (caddr_t
*)vma
->vm_start
,
633 (size_t)(vma
->vm_end
- vma
->vm_start
), vma
->vm_flags
);
634 spl_fstrans_unmark(cookie
);
638 error
= generic_file_mmap(filp
, vma
);
642 #if !defined(HAVE_FILEMAP_RANGE_HAS_PAGE)
643 znode_t
*zp
= ITOZ(ip
);
644 mutex_enter(&zp
->z_lock
);
645 zp
->z_is_mapped
= B_TRUE
;
646 mutex_exit(&zp
->z_lock
);
653 * Populate a page with data for the Linux page cache. This function is
654 * only used to support mmap(2). There will be an identical copy of the
655 * data in the ARC which is kept up to date via .write() and .writepage().
658 zpl_readpage_common(struct page
*pp
)
660 fstrans_cookie_t cookie
;
662 ASSERT(PageLocked(pp
));
664 cookie
= spl_fstrans_mark();
665 int error
= -zfs_getpage(pp
->mapping
->host
, pp
);
666 spl_fstrans_unmark(cookie
);
673 #ifdef HAVE_VFS_READ_FOLIO
675 zpl_read_folio(struct file
*filp
, struct folio
*folio
)
677 return (zpl_readpage_common(&folio
->page
));
681 zpl_readpage(struct file
*filp
, struct page
*pp
)
683 return (zpl_readpage_common(pp
));
688 zpl_readpage_filler(void *data
, struct page
*pp
)
690 return (zpl_readpage_common(pp
));
694 * Populate a set of pages with data for the Linux page cache. This
695 * function will only be called for read ahead and never for demand
696 * paging. For simplicity, the code relies on read_cache_pages() to
697 * correctly lock each page for IO and call zpl_readpage().
699 #ifdef HAVE_VFS_READPAGES
701 zpl_readpages(struct file
*filp
, struct address_space
*mapping
,
702 struct list_head
*pages
, unsigned nr_pages
)
704 return (read_cache_pages(mapping
, pages
, zpl_readpage_filler
, NULL
));
708 zpl_readahead(struct readahead_control
*ractl
)
712 while ((page
= readahead_page(ractl
)) != NULL
) {
715 ret
= zpl_readpage_filler(NULL
, page
);
724 zpl_putpage(struct page
*pp
, struct writeback_control
*wbc
, void *data
)
726 boolean_t
*for_sync
= data
;
727 fstrans_cookie_t cookie
;
729 ASSERT(PageLocked(pp
));
730 ASSERT(!PageWriteback(pp
));
732 cookie
= spl_fstrans_mark();
733 (void) zfs_putpage(pp
->mapping
->host
, pp
, wbc
, *for_sync
);
734 spl_fstrans_unmark(cookie
);
739 #ifdef HAVE_WRITEPAGE_T_FOLIO
741 zpl_putfolio(struct folio
*pp
, struct writeback_control
*wbc
, void *data
)
743 (void) zpl_putpage(&pp
->page
, wbc
, data
);
749 zpl_write_cache_pages(struct address_space
*mapping
,
750 struct writeback_control
*wbc
, void *data
)
754 #ifdef HAVE_WRITEPAGE_T_FOLIO
755 result
= write_cache_pages(mapping
, wbc
, zpl_putfolio
, data
);
757 result
= write_cache_pages(mapping
, wbc
, zpl_putpage
, data
);
763 zpl_writepages(struct address_space
*mapping
, struct writeback_control
*wbc
)
765 znode_t
*zp
= ITOZ(mapping
->host
);
766 zfsvfs_t
*zfsvfs
= ITOZSB(mapping
->host
);
767 enum writeback_sync_modes sync_mode
;
770 if ((result
= zpl_enter(zfsvfs
, FTAG
)) != 0)
772 if (zfsvfs
->z_os
->os_sync
== ZFS_SYNC_ALWAYS
)
773 wbc
->sync_mode
= WB_SYNC_ALL
;
774 zpl_exit(zfsvfs
, FTAG
);
775 sync_mode
= wbc
->sync_mode
;
778 * We don't want to run write_cache_pages() in SYNC mode here, because
779 * that would make putpage() wait for a single page to be committed to
780 * disk every single time, resulting in atrocious performance. Instead
781 * we run it once in non-SYNC mode so that the ZIL gets all the data,
782 * and then we commit it all in one go.
784 boolean_t for_sync
= (sync_mode
== WB_SYNC_ALL
);
785 wbc
->sync_mode
= WB_SYNC_NONE
;
786 result
= zpl_write_cache_pages(mapping
, wbc
, &for_sync
);
787 if (sync_mode
!= wbc
->sync_mode
) {
788 if ((result
= zpl_enter_verify_zp(zfsvfs
, zp
, FTAG
)) != 0)
790 if (zfsvfs
->z_log
!= NULL
)
791 zil_commit(zfsvfs
->z_log
, zp
->z_id
);
792 zpl_exit(zfsvfs
, FTAG
);
795 * We need to call write_cache_pages() again (we can't just
796 * return after the commit) because the previous call in
797 * non-SYNC mode does not guarantee that we got all the dirty
798 * pages (see the implementation of write_cache_pages() for
799 * details). That being said, this is a no-op in most cases.
801 wbc
->sync_mode
= sync_mode
;
802 result
= zpl_write_cache_pages(mapping
, wbc
, &for_sync
);
808 * Write out dirty pages to the ARC, this function is only required to
809 * support mmap(2). Mapped pages may be dirtied by memory operations
810 * which never call .write(). These dirty pages are kept in sync with
811 * the ARC buffers via this hook.
814 zpl_writepage(struct page
*pp
, struct writeback_control
*wbc
)
816 if (ITOZSB(pp
->mapping
->host
)->z_os
->os_sync
== ZFS_SYNC_ALWAYS
)
817 wbc
->sync_mode
= WB_SYNC_ALL
;
819 boolean_t for_sync
= (wbc
->sync_mode
== WB_SYNC_ALL
);
821 return (zpl_putpage(pp
, wbc
, &for_sync
));
825 * The flag combination which matches the behavior of zfs_space() is
826 * FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE. The FALLOC_FL_PUNCH_HOLE
827 * flag was introduced in the 2.6.38 kernel.
829 * The original mode=0 (allocate space) behavior can be reasonably emulated
830 * by checking if enough space exists and creating a sparse file, as real
831 * persistent space reservation is not possible due to COW, snapshots, etc.
834 zpl_fallocate_common(struct inode
*ip
, int mode
, loff_t offset
, loff_t len
)
838 fstrans_cookie_t cookie
;
841 int test_mode
= FALLOC_FL_PUNCH_HOLE
;
842 #ifdef HAVE_FALLOC_FL_ZERO_RANGE
843 test_mode
|= FALLOC_FL_ZERO_RANGE
;
846 if ((mode
& ~(FALLOC_FL_KEEP_SIZE
| test_mode
)) != 0)
847 return (-EOPNOTSUPP
);
849 if (offset
< 0 || len
<= 0)
853 olen
= i_size_read(ip
);
856 cookie
= spl_fstrans_mark();
857 if (mode
& (test_mode
)) {
860 if (mode
& FALLOC_FL_KEEP_SIZE
) {
864 if (offset
+ len
> olen
)
868 bf
.l_whence
= SEEK_SET
;
873 error
= -zfs_space(ITOZ(ip
), F_FREESP
, &bf
, O_RDWR
, offset
, cr
);
874 } else if ((mode
& ~FALLOC_FL_KEEP_SIZE
) == 0) {
875 unsigned int percent
= zfs_fallocate_reserve_percent
;
876 struct kstatfs statfs
;
878 /* Legacy mode, disable fallocate compatibility. */
885 * Use zfs_statvfs() instead of dmu_objset_space() since it
886 * also checks project quota limits, which are relevant here.
888 error
= zfs_statvfs(ip
, &statfs
);
893 * Shrink available space a bit to account for overhead/races.
894 * We know the product previously fit into availbytes from
895 * dmu_objset_space(), so the smaller product will also fit.
897 if (len
> statfs
.f_bavail
* (statfs
.f_bsize
* 100 / percent
)) {
901 if (!(mode
& FALLOC_FL_KEEP_SIZE
) && offset
+ len
> olen
)
902 error
= zfs_freesp(ITOZ(ip
), offset
+ len
, 0, 0, FALSE
);
905 spl_fstrans_unmark(cookie
);
906 spl_inode_unlock(ip
);
914 zpl_fallocate(struct file
*filp
, int mode
, loff_t offset
, loff_t len
)
916 return zpl_fallocate_common(file_inode(filp
),
921 zpl_ioctl_getversion(struct file
*filp
, void __user
*arg
)
923 uint32_t generation
= file_inode(filp
)->i_generation
;
925 return (copy_to_user(arg
, &generation
, sizeof (generation
)));
928 #ifdef HAVE_FILE_FADVISE
930 zpl_fadvise(struct file
*filp
, loff_t offset
, loff_t len
, int advice
)
932 struct inode
*ip
= file_inode(filp
);
933 znode_t
*zp
= ITOZ(ip
);
934 zfsvfs_t
*zfsvfs
= ITOZSB(ip
);
935 objset_t
*os
= zfsvfs
->z_os
;
938 if (S_ISFIFO(ip
->i_mode
))
941 if (offset
< 0 || len
< 0)
944 if ((error
= zpl_enter_verify_zp(zfsvfs
, zp
, FTAG
)) != 0)
948 case POSIX_FADV_SEQUENTIAL
:
949 case POSIX_FADV_WILLNEED
:
950 #ifdef HAVE_GENERIC_FADVISE
951 if (zn_has_cached_data(zp
, offset
, offset
+ len
- 1))
952 error
= generic_fadvise(filp
, offset
, len
, advice
);
955 * Pass on the caller's size directly, but note that
956 * dmu_prefetch_max will effectively cap it. If there
957 * really is a larger sequential access pattern, perhaps
958 * dmu_zfetch will detect it.
961 len
= i_size_read(ip
) - offset
;
963 dmu_prefetch(os
, zp
->z_id
, 0, offset
, len
,
964 ZIO_PRIORITY_ASYNC_READ
);
966 case POSIX_FADV_NORMAL
:
967 case POSIX_FADV_RANDOM
:
968 case POSIX_FADV_DONTNEED
:
969 case POSIX_FADV_NOREUSE
:
970 /* ignored for now */
977 zfs_exit(zfsvfs
, FTAG
);
981 #endif /* HAVE_FILE_FADVISE */
983 #define ZFS_FL_USER_VISIBLE (FS_FL_USER_VISIBLE | ZFS_PROJINHERIT_FL)
984 #define ZFS_FL_USER_MODIFIABLE (FS_FL_USER_MODIFIABLE | ZFS_PROJINHERIT_FL)
987 __zpl_ioctl_getflags(struct inode
*ip
)
989 uint64_t zfs_flags
= ITOZ(ip
)->z_pflags
;
990 uint32_t ioctl_flags
= 0;
992 if (zfs_flags
& ZFS_IMMUTABLE
)
993 ioctl_flags
|= FS_IMMUTABLE_FL
;
995 if (zfs_flags
& ZFS_APPENDONLY
)
996 ioctl_flags
|= FS_APPEND_FL
;
998 if (zfs_flags
& ZFS_NODUMP
)
999 ioctl_flags
|= FS_NODUMP_FL
;
1001 if (zfs_flags
& ZFS_PROJINHERIT
)
1002 ioctl_flags
|= ZFS_PROJINHERIT_FL
;
1004 return (ioctl_flags
& ZFS_FL_USER_VISIBLE
);
1008 * Map zfs file z_pflags (xvattr_t) to linux file attributes. Only file
1009 * attributes common to both Linux and Solaris are mapped.
1012 zpl_ioctl_getflags(struct file
*filp
, void __user
*arg
)
1017 flags
= __zpl_ioctl_getflags(file_inode(filp
));
1018 err
= copy_to_user(arg
, &flags
, sizeof (flags
));
1024 * fchange() is a helper macro to detect if we have been asked to change a
1025 * flag. This is ugly, but the requirement that we do this is a consequence of
1026 * how the Linux file attribute interface was designed. Another consequence is
1027 * that concurrent modification of files suffers from a TOCTOU race. Neither
1028 * are things we can fix without modifying the kernel-userland interface, which
1029 * is outside of our jurisdiction.
1032 #define fchange(f0, f1, b0, b1) (!((f0) & (b0)) != !((f1) & (b1)))
1035 __zpl_ioctl_setflags(struct inode
*ip
, uint32_t ioctl_flags
, xvattr_t
*xva
)
1037 uint64_t zfs_flags
= ITOZ(ip
)->z_pflags
;
1040 if (ioctl_flags
& ~(FS_IMMUTABLE_FL
| FS_APPEND_FL
| FS_NODUMP_FL
|
1041 ZFS_PROJINHERIT_FL
))
1042 return (-EOPNOTSUPP
);
1044 if (ioctl_flags
& ~ZFS_FL_USER_MODIFIABLE
)
1047 if ((fchange(ioctl_flags
, zfs_flags
, FS_IMMUTABLE_FL
, ZFS_IMMUTABLE
) ||
1048 fchange(ioctl_flags
, zfs_flags
, FS_APPEND_FL
, ZFS_APPENDONLY
)) &&
1049 !capable(CAP_LINUX_IMMUTABLE
))
1052 if (!zpl_inode_owner_or_capable(zfs_init_idmap
, ip
))
1056 xoap
= xva_getxoptattr(xva
);
1058 #define FLAG_CHANGE(iflag, zflag, xflag, xfield) do { \
1059 if (((ioctl_flags & (iflag)) && !(zfs_flags & (zflag))) || \
1060 ((zfs_flags & (zflag)) && !(ioctl_flags & (iflag)))) { \
1061 XVA_SET_REQ(xva, (xflag)); \
1062 (xfield) = ((ioctl_flags & (iflag)) != 0); \
1066 FLAG_CHANGE(FS_IMMUTABLE_FL
, ZFS_IMMUTABLE
, XAT_IMMUTABLE
,
1067 xoap
->xoa_immutable
);
1068 FLAG_CHANGE(FS_APPEND_FL
, ZFS_APPENDONLY
, XAT_APPENDONLY
,
1069 xoap
->xoa_appendonly
);
1070 FLAG_CHANGE(FS_NODUMP_FL
, ZFS_NODUMP
, XAT_NODUMP
,
1072 FLAG_CHANGE(ZFS_PROJINHERIT_FL
, ZFS_PROJINHERIT
, XAT_PROJINHERIT
,
1073 xoap
->xoa_projinherit
);
1081 zpl_ioctl_setflags(struct file
*filp
, void __user
*arg
)
1083 struct inode
*ip
= file_inode(filp
);
1085 cred_t
*cr
= CRED();
1088 fstrans_cookie_t cookie
;
1090 if (copy_from_user(&flags
, arg
, sizeof (flags
)))
1093 err
= __zpl_ioctl_setflags(ip
, flags
, &xva
);
1098 cookie
= spl_fstrans_mark();
1099 err
= -zfs_setattr(ITOZ(ip
), (vattr_t
*)&xva
, 0, cr
, zfs_init_idmap
);
1100 spl_fstrans_unmark(cookie
);
1107 zpl_ioctl_getxattr(struct file
*filp
, void __user
*arg
)
1109 zfsxattr_t fsx
= { 0 };
1110 struct inode
*ip
= file_inode(filp
);
1113 fsx
.fsx_xflags
= __zpl_ioctl_getflags(ip
);
1114 fsx
.fsx_projid
= ITOZ(ip
)->z_projid
;
1115 err
= copy_to_user(arg
, &fsx
, sizeof (fsx
));
1121 zpl_ioctl_setxattr(struct file
*filp
, void __user
*arg
)
1123 struct inode
*ip
= file_inode(filp
);
1125 cred_t
*cr
= CRED();
1129 fstrans_cookie_t cookie
;
1131 if (copy_from_user(&fsx
, arg
, sizeof (fsx
)))
1134 if (!zpl_is_valid_projid(fsx
.fsx_projid
))
1137 err
= __zpl_ioctl_setflags(ip
, fsx
.fsx_xflags
, &xva
);
1141 xoap
= xva_getxoptattr(&xva
);
1142 XVA_SET_REQ(&xva
, XAT_PROJID
);
1143 xoap
->xoa_projid
= fsx
.fsx_projid
;
1146 cookie
= spl_fstrans_mark();
1147 err
= -zfs_setattr(ITOZ(ip
), (vattr_t
*)&xva
, 0, cr
, zfs_init_idmap
);
1148 spl_fstrans_unmark(cookie
);
1155 * Expose Additional File Level Attributes of ZFS.
1158 zpl_ioctl_getdosflags(struct file
*filp
, void __user
*arg
)
1160 struct inode
*ip
= file_inode(filp
);
1161 uint64_t dosflags
= ITOZ(ip
)->z_pflags
;
1162 dosflags
&= ZFS_DOS_FL_USER_VISIBLE
;
1163 int err
= copy_to_user(arg
, &dosflags
, sizeof (dosflags
));
1169 __zpl_ioctl_setdosflags(struct inode
*ip
, uint64_t ioctl_flags
, xvattr_t
*xva
)
1171 uint64_t zfs_flags
= ITOZ(ip
)->z_pflags
;
1174 if (ioctl_flags
& (~ZFS_DOS_FL_USER_VISIBLE
))
1175 return (-EOPNOTSUPP
);
1177 if ((fchange(ioctl_flags
, zfs_flags
, ZFS_IMMUTABLE
, ZFS_IMMUTABLE
) ||
1178 fchange(ioctl_flags
, zfs_flags
, ZFS_APPENDONLY
, ZFS_APPENDONLY
)) &&
1179 !capable(CAP_LINUX_IMMUTABLE
))
1182 if (!zpl_inode_owner_or_capable(zfs_init_idmap
, ip
))
1186 xoap
= xva_getxoptattr(xva
);
1188 #define FLAG_CHANGE(iflag, xflag, xfield) do { \
1189 if (((ioctl_flags & (iflag)) && !(zfs_flags & (iflag))) || \
1190 ((zfs_flags & (iflag)) && !(ioctl_flags & (iflag)))) { \
1191 XVA_SET_REQ(xva, (xflag)); \
1192 (xfield) = ((ioctl_flags & (iflag)) != 0); \
1196 FLAG_CHANGE(ZFS_IMMUTABLE
, XAT_IMMUTABLE
, xoap
->xoa_immutable
);
1197 FLAG_CHANGE(ZFS_APPENDONLY
, XAT_APPENDONLY
, xoap
->xoa_appendonly
);
1198 FLAG_CHANGE(ZFS_NODUMP
, XAT_NODUMP
, xoap
->xoa_nodump
);
1199 FLAG_CHANGE(ZFS_READONLY
, XAT_READONLY
, xoap
->xoa_readonly
);
1200 FLAG_CHANGE(ZFS_HIDDEN
, XAT_HIDDEN
, xoap
->xoa_hidden
);
1201 FLAG_CHANGE(ZFS_SYSTEM
, XAT_SYSTEM
, xoap
->xoa_system
);
1202 FLAG_CHANGE(ZFS_ARCHIVE
, XAT_ARCHIVE
, xoap
->xoa_archive
);
1203 FLAG_CHANGE(ZFS_NOUNLINK
, XAT_NOUNLINK
, xoap
->xoa_nounlink
);
1204 FLAG_CHANGE(ZFS_REPARSE
, XAT_REPARSE
, xoap
->xoa_reparse
);
1205 FLAG_CHANGE(ZFS_OFFLINE
, XAT_OFFLINE
, xoap
->xoa_offline
);
1206 FLAG_CHANGE(ZFS_SPARSE
, XAT_SPARSE
, xoap
->xoa_sparse
);
1214 * Set Additional File Level Attributes of ZFS.
1217 zpl_ioctl_setdosflags(struct file
*filp
, void __user
*arg
)
1219 struct inode
*ip
= file_inode(filp
);
1221 cred_t
*cr
= CRED();
1224 fstrans_cookie_t cookie
;
1226 if (copy_from_user(&dosflags
, arg
, sizeof (dosflags
)))
1229 err
= __zpl_ioctl_setdosflags(ip
, dosflags
, &xva
);
1234 cookie
= spl_fstrans_mark();
1235 err
= -zfs_setattr(ITOZ(ip
), (vattr_t
*)&xva
, 0, cr
, zfs_init_idmap
);
1236 spl_fstrans_unmark(cookie
);
1243 zpl_ioctl(struct file
*filp
, unsigned int cmd
, unsigned long arg
)
1246 case FS_IOC_GETVERSION
:
1247 return (zpl_ioctl_getversion(filp
, (void *)arg
));
1248 case FS_IOC_GETFLAGS
:
1249 return (zpl_ioctl_getflags(filp
, (void *)arg
));
1250 case FS_IOC_SETFLAGS
:
1251 return (zpl_ioctl_setflags(filp
, (void *)arg
));
1252 case ZFS_IOC_FSGETXATTR
:
1253 return (zpl_ioctl_getxattr(filp
, (void *)arg
));
1254 case ZFS_IOC_FSSETXATTR
:
1255 return (zpl_ioctl_setxattr(filp
, (void *)arg
));
1256 case ZFS_IOC_GETDOSFLAGS
:
1257 return (zpl_ioctl_getdosflags(filp
, (void *)arg
));
1258 case ZFS_IOC_SETDOSFLAGS
:
1259 return (zpl_ioctl_setdosflags(filp
, (void *)arg
));
1265 #ifdef CONFIG_COMPAT
1267 zpl_compat_ioctl(struct file
*filp
, unsigned int cmd
, unsigned long arg
)
1270 case FS_IOC32_GETVERSION
:
1271 cmd
= FS_IOC_GETVERSION
;
1273 case FS_IOC32_GETFLAGS
:
1274 cmd
= FS_IOC_GETFLAGS
;
1276 case FS_IOC32_SETFLAGS
:
1277 cmd
= FS_IOC_SETFLAGS
;
1282 return (zpl_ioctl(filp
, cmd
, (unsigned long)compat_ptr(arg
)));
1284 #endif /* CONFIG_COMPAT */
1286 const struct address_space_operations zpl_address_space_operations
= {
1287 #ifdef HAVE_VFS_READPAGES
1288 .readpages
= zpl_readpages
,
1290 .readahead
= zpl_readahead
,
1292 #ifdef HAVE_VFS_READ_FOLIO
1293 .read_folio
= zpl_read_folio
,
1295 .readpage
= zpl_readpage
,
1297 .writepage
= zpl_writepage
,
1298 .writepages
= zpl_writepages
,
1299 .direct_IO
= zpl_direct_IO
,
1300 #ifdef HAVE_VFS_SET_PAGE_DIRTY_NOBUFFERS
1301 .set_page_dirty
= __set_page_dirty_nobuffers
,
1303 #ifdef HAVE_VFS_FILEMAP_DIRTY_FOLIO
1304 .dirty_folio
= filemap_dirty_folio
,
1308 const struct file_operations zpl_file_operations
= {
1310 .release
= zpl_release
,
1311 .llseek
= zpl_llseek
,
1312 #ifdef HAVE_VFS_RW_ITERATE
1313 #ifdef HAVE_NEW_SYNC_READ
1314 .read
= new_sync_read
,
1315 .write
= new_sync_write
,
1317 .read_iter
= zpl_iter_read
,
1318 .write_iter
= zpl_iter_write
,
1319 #ifdef HAVE_VFS_IOV_ITER
1320 .splice_read
= generic_file_splice_read
,
1321 .splice_write
= iter_file_splice_write
,
1324 .read
= do_sync_read
,
1325 .write
= do_sync_write
,
1326 .aio_read
= zpl_aio_read
,
1327 .aio_write
= zpl_aio_write
,
1331 #ifdef HAVE_FILE_AIO_FSYNC
1332 .aio_fsync
= zpl_aio_fsync
,
1334 .fallocate
= zpl_fallocate
,
1335 #ifdef HAVE_VFS_COPY_FILE_RANGE
1336 .copy_file_range
= zpl_copy_file_range
,
1338 #ifdef HAVE_VFS_REMAP_FILE_RANGE
1339 .remap_file_range
= zpl_remap_file_range
,
1341 #ifdef HAVE_VFS_CLONE_FILE_RANGE
1342 .clone_file_range
= zpl_clone_file_range
,
1344 #ifdef HAVE_VFS_DEDUPE_FILE_RANGE
1345 .dedupe_file_range
= zpl_dedupe_file_range
,
1347 #ifdef HAVE_FILE_FADVISE
1348 .fadvise
= zpl_fadvise
,
1350 .unlocked_ioctl
= zpl_ioctl
,
1351 #ifdef CONFIG_COMPAT
1352 .compat_ioctl
= zpl_compat_ioctl
,
1356 const struct file_operations zpl_dir_file_operations
= {
1357 .llseek
= generic_file_llseek
,
1358 .read
= generic_read_dir
,
1359 #if defined(HAVE_VFS_ITERATE_SHARED)
1360 .iterate_shared
= zpl_iterate
,
1361 #elif defined(HAVE_VFS_ITERATE)
1362 .iterate
= zpl_iterate
,
1364 .readdir
= zpl_readdir
,
1367 .unlocked_ioctl
= zpl_ioctl
,
1368 #ifdef CONFIG_COMPAT
1369 .compat_ioctl
= zpl_compat_ioctl
,
1374 module_param(zfs_fallocate_reserve_percent
, uint
, 0644);
1375 MODULE_PARM_DESC(zfs_fallocate_reserve_percent
,
1376 "Percentage of length to use for the available capacity check");